hledger-lib-1.2/Hledger/0000755000000000000000000000000013067102102013262 5ustar0000000000000000hledger-lib-1.2/Hledger/Data/0000755000000000000000000000000013066746043014154 5ustar0000000000000000hledger-lib-1.2/Hledger/Read/0000755000000000000000000000000013067102102014135 5ustar0000000000000000hledger-lib-1.2/Hledger/Reports/0000755000000000000000000000000013067565677014756 5ustar0000000000000000hledger-lib-1.2/Hledger/Utils/0000755000000000000000000000000013066746043014403 5ustar0000000000000000hledger-lib-1.2/doc/0000755000000000000000000000000013067512403012466 5ustar0000000000000000hledger-lib-1.2/tests/0000755000000000000000000000000013067266175013077 5ustar0000000000000000hledger-lib-1.2/Hledger.hs0000644000000000000000000000066013035210046013622 0ustar0000000000000000module Hledger ( module X ,tests_Hledger ) where import Test.HUnit import Hledger.Data as X import Hledger.Query as X import Hledger.Read as X hiding (samplejournal) import Hledger.Reports as X import Hledger.Utils as X tests_Hledger = TestList [ tests_Hledger_Data ,tests_Hledger_Query ,tests_Hledger_Read ,tests_Hledger_Reports ] hledger-lib-1.2/Hledger/Data.hs0000644000000000000000000000372413066173044014511 0ustar0000000000000000{-| The Hledger.Data library allows parsing and querying of C++ ledger-style journal files. It generally provides a compatible subset of C++ ledger's functionality. This package re-exports all the Hledger.Data.* modules (except UTF8, which requires an explicit import.) -} module Hledger.Data ( module Hledger.Data.Account, module Hledger.Data.AccountName, module Hledger.Data.Amount, module Hledger.Data.Commodity, module Hledger.Data.Dates, module Hledger.Data.Journal, module Hledger.Data.Ledger, module Hledger.Data.MarketPrice, module Hledger.Data.Period, module Hledger.Data.Posting, module Hledger.Data.RawOptions, module Hledger.Data.StringFormat, module Hledger.Data.Timeclock, module Hledger.Data.Transaction, module Hledger.Data.Types, tests_Hledger_Data ) where import Test.HUnit import Hledger.Data.Account import Hledger.Data.AccountName import Hledger.Data.Amount import Hledger.Data.Commodity import Hledger.Data.Dates import Hledger.Data.Journal import Hledger.Data.Ledger import Hledger.Data.MarketPrice import Hledger.Data.Period import Hledger.Data.Posting import Hledger.Data.RawOptions import Hledger.Data.StringFormat import Hledger.Data.Timeclock import Hledger.Data.Transaction import Hledger.Data.Types tests_Hledger_Data :: Test tests_Hledger_Data = TestList [ tests_Hledger_Data_Account ,tests_Hledger_Data_AccountName ,tests_Hledger_Data_Amount ,tests_Hledger_Data_Commodity ,tests_Hledger_Data_Journal ,tests_Hledger_Data_MarketPrice ,tests_Hledger_Data_Ledger ,tests_Hledger_Data_Posting -- ,tests_Hledger_Data_RawOptions -- ,tests_Hledger_Data_StringFormat ,tests_Hledger_Data_Timeclock ,tests_Hledger_Data_Transaction -- ,tests_Hledger_Data_Types ] hledger-lib-1.2/Hledger/Data/Account.hs0000644000000000000000000001606313042200120016061 0ustar0000000000000000{-# LANGUAGE RecordWildCards, StandaloneDeriving, OverloadedStrings #-} {-| An 'Account' has a name, a list of subaccounts, an optional parent account, and subaccounting-excluding and -including balances. -} module Hledger.Data.Account where import Data.List import Data.Maybe import qualified Data.Map as M import Safe (headMay, lookupJustDef) import Test.HUnit import Text.Printf import Hledger.Data.AccountName import Hledger.Data.Amount import Hledger.Data.Posting() import Hledger.Data.Types import Hledger.Utils -- deriving instance Show Account instance Show Account where show Account{..} = printf "Account %s (boring:%s, postings:%d, ebalance:%s, ibalance:%s)" aname (if aboring then "y" else "n" :: String) anumpostings (showMixedAmount aebalance) (showMixedAmount aibalance) instance Eq Account where (==) a b = aname a == aname b -- quick equality test for speed -- and -- [ aname a == aname b -- -- , aparent a == aparent b -- avoid infinite recursion -- , asubs a == asubs b -- , aebalance a == aebalance b -- , aibalance a == aibalance b -- ] nullacct = Account { aname = "" , aparent = Nothing , asubs = [] , anumpostings = 0 , aebalance = nullmixedamt , aibalance = nullmixedamt , aboring = False } -- | Derive 1. an account tree and 2. each account's total exclusive -- and inclusive changes from a list of postings. -- This is the core of the balance command (and of *ledger). -- The accounts are returned as a list in flattened tree order, -- and also reference each other as a tree. -- (The first account is the root of the tree.) accountsFromPostings :: [Posting] -> [Account] accountsFromPostings ps = let acctamts = [(paccount p,pamount p) | p <- ps] grouped = groupBy (\a b -> fst a == fst b) $ sort $ acctamts counted = [(a, length acctamts) | acctamts@((a,_):_) <- grouped] summed = map (\as@((aname,_):_) -> (aname, sumStrict $ map snd as)) grouped -- always non-empty nametree = treeFromPaths $ map (expandAccountName . fst) summed acctswithnames = nameTreeToAccount "root" nametree acctswithnumps = mapAccounts setnumps acctswithnames where setnumps a = a{anumpostings=fromMaybe 0 $ lookup (aname a) counted} acctswithebals = mapAccounts setebalance acctswithnumps where setebalance a = a{aebalance=lookupJustDef nullmixedamt (aname a) summed} acctswithibals = sumAccounts acctswithebals acctswithparents = tieAccountParents acctswithibals acctsflattened = flattenAccounts acctswithparents in acctsflattened -- | Convert an AccountName tree to an Account tree nameTreeToAccount :: AccountName -> FastTree AccountName -> Account nameTreeToAccount rootname (T m) = nullacct{ aname=rootname, asubs=map (uncurry nameTreeToAccount) $ M.assocs m } -- | Tie the knot so all subaccounts' parents are set correctly. tieAccountParents :: Account -> Account tieAccountParents = tie Nothing where tie parent a@Account{..} = a' where a' = a{aparent=parent, asubs=map (tie (Just a')) asubs} -- | Get this account's parent accounts, from the nearest up to the root. parentAccounts :: Account -> [Account] parentAccounts Account{aparent=Nothing} = [] parentAccounts Account{aparent=Just a} = a:parentAccounts a -- | List the accounts at each level of the account tree. accountsLevels :: Account -> [[Account]] accountsLevels = takeWhile (not . null) . iterate (concatMap asubs) . (:[]) -- | Map a (non-tree-structure-modifying) function over this and sub accounts. mapAccounts :: (Account -> Account) -> Account -> Account mapAccounts f a = f a{asubs = map (mapAccounts f) $ asubs a} -- | Is the predicate true on any of this account or its subaccounts ? anyAccounts :: (Account -> Bool) -> Account -> Bool anyAccounts p a | p a = True | otherwise = any (anyAccounts p) $ asubs a -- | Add subaccount-inclusive balances to an account tree. sumAccounts :: Account -> Account sumAccounts a | null $ asubs a = a{aibalance=aebalance a} | otherwise = a{aibalance=ibal, asubs=subs} where subs = map sumAccounts $ asubs a ibal = sum $ aebalance a : map aibalance subs -- | Remove all subaccounts below a certain depth. clipAccounts :: Int -> Account -> Account clipAccounts 0 a = a{asubs=[]} clipAccounts d a = a{asubs=subs} where subs = map (clipAccounts (d-1)) $ asubs a -- | Remove subaccounts below the specified depth, aggregating their balance at the depth limit -- (accounts at the depth limit will have any sub-balances merged into their exclusive balance). clipAccountsAndAggregate :: Int -> [Account] -> [Account] clipAccountsAndAggregate d as = combined where clipped = [a{aname=clipOrEllipsifyAccountName d $ aname a} | a <- as] combined = [a{aebalance=sum (map aebalance same)} | same@(a:_) <- groupBy (\a1 a2 -> aname a1 == aname a2) clipped] {- test cases, assuming d=1: assets:cash 1 1 assets:checking 1 1 -> as: [assets:cash 1 1, assets:checking 1 1] clipped: [assets 1 1, assets 1 1] combined: [assets 2 2] assets 0 2 assets:cash 1 1 assets:checking 1 1 -> as: [assets 0 2, assets:cash 1 1, assets:checking 1 1] clipped: [assets 0 2, assets 1 1, assets 1 1] combined: [assets 2 2] assets 0 2 assets:bank 1 2 assets:bank:checking 1 1 -> as: [assets 0 2, assets:bank 1 2, assets:bank:checking 1 1] clipped: [assets 0 2, assets 1 2, assets 1 1] combined: [assets 2 2] -} -- | Remove all leaf accounts and subtrees matching a predicate. pruneAccounts :: (Account -> Bool) -> Account -> Maybe Account pruneAccounts p = headMay . prune where prune a | null prunedsubs = if p a then [] else [a'] | otherwise = [a'] where prunedsubs = concatMap prune $ asubs a a' = a{asubs=prunedsubs} -- | Flatten an account tree into a list, which is sometimes -- convenient. Note since accounts link to their parents/subs, the -- tree's structure remains intact and can still be used. It's a tree/list! flattenAccounts :: Account -> [Account] flattenAccounts a = squish a [] where squish a as = a : Prelude.foldr squish as (asubs a) -- | Filter an account tree (to a list). filterAccounts :: (Account -> Bool) -> Account -> [Account] filterAccounts p a | p a = a : concatMap (filterAccounts p) (asubs a) | otherwise = concatMap (filterAccounts p) (asubs a) -- | Search an account list by name. lookupAccount :: AccountName -> [Account] -> Maybe Account lookupAccount a = find ((==a).aname) -- debug helpers printAccounts :: Account -> IO () printAccounts = putStrLn . showAccounts showAccounts = unlines . map showAccountDebug . flattenAccounts showAccountsBoringFlag = unlines . map (show . aboring) . flattenAccounts showAccountDebug a = printf "%-25s %4s %4s %s" (aname a) (showMixedAmount $ aebalance a) (showMixedAmount $ aibalance a) (if aboring a then "b" else " " :: String) tests_Hledger_Data_Account = TestList [ ] hledger-lib-1.2/Hledger/Data/AccountName.hs0000644000000000000000000001774413066173044016715 0ustar0000000000000000{-# LANGUAGE NoMonomorphismRestriction, OverloadedStrings #-} {-| 'AccountName's are strings like @assets:cash:petty@, with multiple components separated by ':'. From a set of these we derive the account hierarchy. -} module Hledger.Data.AccountName where import Data.List import Data.Monoid import Data.Text (Text) import qualified Data.Text as T import Data.Tree import Test.HUnit import Text.Printf import Hledger.Data.Types import Hledger.Utils acctsepchar :: Char acctsepchar = ':' acctsep :: Text acctsep = T.pack [acctsepchar] -- accountNameComponents :: AccountName -> [String] -- accountNameComponents = splitAtElement acctsepchar accountNameComponents :: AccountName -> [Text] accountNameComponents = T.splitOn acctsep accountNameFromComponents :: [Text] -> AccountName accountNameFromComponents = T.intercalate acctsep accountLeafName :: AccountName -> Text accountLeafName = last . accountNameComponents -- | Truncate all account name components but the last to two characters. accountSummarisedName :: AccountName -> Text accountSummarisedName a -- length cs > 1 = take 2 (head cs) ++ ":" ++ a' | length cs > 1 = (T.intercalate ":" (map (T.take 2) $ init cs)) <> ":" <> a' | otherwise = a' where cs = accountNameComponents a a' = accountLeafName a accountNameLevel :: AccountName -> Int accountNameLevel "" = 0 accountNameLevel a = T.length (T.filter (==acctsepchar) a) + 1 accountNameDrop :: Int -> AccountName -> AccountName accountNameDrop n = accountNameFromComponents . drop n . accountNameComponents -- | ["a:b:c","d:e"] -> ["a","a:b","a:b:c","d","d:e"] expandAccountNames :: [AccountName] -> [AccountName] expandAccountNames as = nub $ concatMap expandAccountName as -- | "a:b:c" -> ["a","a:b","a:b:c"] expandAccountName :: AccountName -> [AccountName] expandAccountName = map accountNameFromComponents . tail . inits . accountNameComponents -- | ["a:b:c","d:e"] -> ["a","d"] topAccountNames :: [AccountName] -> [AccountName] topAccountNames as = [a | a <- expandAccountNames as, accountNameLevel a == 1] parentAccountName :: AccountName -> AccountName parentAccountName = accountNameFromComponents . init . accountNameComponents parentAccountNames :: AccountName -> [AccountName] parentAccountNames a = parentAccountNames' $ parentAccountName a where parentAccountNames' "" = [] parentAccountNames' a = a : parentAccountNames' (parentAccountName a) -- | Is the first account a parent or other ancestor of (and not the same as) the second ? isAccountNamePrefixOf :: AccountName -> AccountName -> Bool isAccountNamePrefixOf = T.isPrefixOf . (<> acctsep) isSubAccountNameOf :: AccountName -> AccountName -> Bool s `isSubAccountNameOf` p = (p `isAccountNamePrefixOf` s) && (accountNameLevel s == (accountNameLevel p + 1)) -- | From a list of account names, select those which are direct -- subaccounts of the given account name. subAccountNamesFrom :: [AccountName] -> AccountName -> [AccountName] subAccountNamesFrom accts a = filter (`isSubAccountNameOf` a) accts -- | Convert a list of account names to a tree. accountNameTreeFrom :: [AccountName] -> Tree AccountName accountNameTreeFrom accts = Node "root" (accounttreesfrom (topAccountNames accts)) where accounttreesfrom :: [AccountName] -> [Tree AccountName] accounttreesfrom [] = [] accounttreesfrom as = [Node a (accounttreesfrom $ subs a) | a <- as] subs = subAccountNamesFrom (expandAccountNames accts) nullaccountnametree = Node "root" [] -- | Elide an account name to fit in the specified width. -- From the ledger 2.6 news: -- -- @ -- What Ledger now does is that if an account name is too long, it will -- start abbreviating the first parts of the account name down to two -- letters in length. If this results in a string that is still too -- long, the front will be elided -- not the end. For example: -- -- Expenses:Cash ; OK, not too long -- Ex:Wednesday:Cash ; "Expenses" was abbreviated to fit -- Ex:We:Afternoon:Cash ; "Expenses" and "Wednesday" abbreviated -- ; Expenses:Wednesday:Afternoon:Lunch:Snack:Candy:Chocolate:Cash -- ..:Af:Lu:Sn:Ca:Ch:Cash ; Abbreviated and elided! -- @ elideAccountName :: Int -> AccountName -> AccountName elideAccountName width s -- XXX special case for transactions register's multi-account pseudo-names | " (split)" `T.isSuffixOf` s = let names = T.splitOn ", " $ T.take (T.length s - 8) s widthpername = (max 0 (width - 8 - 2 * (max 1 (length names) - 1))) `div` length names in fitText Nothing (Just width) True False $ (<>" (split)") $ T.intercalate ", " $ [accountNameFromComponents $ elideparts widthpername [] $ accountNameComponents s' | s' <- names] | otherwise = fitText Nothing (Just width) True False $ accountNameFromComponents $ elideparts width [] $ accountNameComponents s where elideparts :: Int -> [Text] -> [Text] -> [Text] elideparts width done ss | textWidth (accountNameFromComponents $ done++ss) <= width = done++ss | length ss > 1 = elideparts width (done++[textTakeWidth 2 $ head ss]) (tail ss) | otherwise = done++ss -- | Keep only the first n components of an account name, where n -- is a positive integer. If n is 0, returns the empty string. clipAccountName :: Int -> AccountName -> AccountName clipAccountName n = accountNameFromComponents . take n . accountNameComponents -- | Keep only the first n components of an account name, where n -- is a positive integer. If n is 0, returns "...". clipOrEllipsifyAccountName :: Int -> AccountName -> AccountName clipOrEllipsifyAccountName 0 = const "..." clipOrEllipsifyAccountName n = accountNameFromComponents . take n . accountNameComponents -- | Escape an AccountName for use within a regular expression. -- >>> putStr $ escapeName "First?!#$*?$(*) !@^#*? %)*!@#" -- First\?!#\$\*\?\$\(\*\) !@\^#\*\? %\)\*!@# escapeName :: AccountName -> Regexp escapeName = regexReplaceBy "[[?+|()*\\\\^$]" ("\\" <>) . T.unpack -- | Convert an account name to a regular expression matching it and its subaccounts. accountNameToAccountRegex :: AccountName -> Regexp accountNameToAccountRegex "" = "" accountNameToAccountRegex a = printf "^%s(:|$)" (escapeName a) -- | Convert an account name to a regular expression matching it but not its subaccounts. accountNameToAccountOnlyRegex :: AccountName -> Regexp accountNameToAccountOnlyRegex "" = "" accountNameToAccountOnlyRegex a = printf "^%s$" $ escapeName a -- XXX pack -- | Convert an exact account-matching regular expression to a plain account name. accountRegexToAccountName :: Regexp -> AccountName accountRegexToAccountName = T.pack . regexReplace "^\\^(.*?)\\(:\\|\\$\\)$" "\\1" -- XXX pack -- | Does this string look like an exact account-matching regular expression ? isAccountRegex :: String -> Bool isAccountRegex s = take 1 s == "^" && (take 5 $ reverse s) == ")$|:(" tests_Hledger_Data_AccountName = TestList [ "accountNameTreeFrom" ~: do accountNameTreeFrom ["a"] `is` Node "root" [Node "a" []] accountNameTreeFrom ["a","b"] `is` Node "root" [Node "a" [], Node "b" []] accountNameTreeFrom ["a","a:b"] `is` Node "root" [Node "a" [Node "a:b" []]] accountNameTreeFrom ["a:b:c"] `is` Node "root" [Node "a" [Node "a:b" [Node "a:b:c" []]]] ,"expandAccountNames" ~: expandAccountNames ["assets:cash","assets:checking","expenses:vacation"] `is` ["assets","assets:cash","assets:checking","expenses","expenses:vacation"] ,"isAccountNamePrefixOf" ~: do "assets" `isAccountNamePrefixOf` "assets" `is` False "assets" `isAccountNamePrefixOf` "assets:bank" `is` True "assets" `isAccountNamePrefixOf` "assets:bank:checking" `is` True "my assets" `isAccountNamePrefixOf` "assets:bank" `is` False ,"isSubAccountNameOf" ~: do "assets" `isSubAccountNameOf` "assets" `is` False "assets:bank" `isSubAccountNameOf` "assets" `is` True "assets:bank:checking" `isSubAccountNameOf` "assets" `is` False "assets:bank" `isSubAccountNameOf` "my assets" `is` False ] hledger-lib-1.2/Hledger/Data/Amount.hs0000644000000000000000000006336213042200120015734 0ustar0000000000000000{-| A simple 'Amount' is some quantity of money, shares, or anything else. It has a (possibly null) 'CommoditySymbol' and a numeric quantity: @ $1 £-50 EUR 3.44 GOOG 500 1.5h 90 apples 0 @ It may also have an assigned 'Price', representing this amount's per-unit or total cost in a different commodity. If present, this is rendered like so: @ EUR 2 \@ $1.50 (unit price) EUR 2 \@\@ $3 (total price) @ A 'MixedAmount' is zero or more simple amounts, so can represent multiple commodities; this is the type most often used: @ 0 $50 + EUR 3 16h + $13.55 + AAPL 500 + 6 oranges @ When a mixed amount has been \"normalised\", it has no more than one amount in each commodity and no zero amounts; or it has just a single zero amount and no others. Limited arithmetic with simple and mixed amounts is supported, best used with similar amounts since it mostly ignores assigned prices and commodity exchange rates. -} {-# LANGUAGE CPP, StandaloneDeriving, RecordWildCards, OverloadedStrings #-} module Hledger.Data.Amount ( -- * Amount amount, nullamt, missingamt, num, usd, eur, gbp, hrs, at, (@@), amountWithCommodity, -- ** arithmetic costOfAmount, divideAmount, -- ** rendering amountstyle, showAmount, showAmountWithZeroCommodity, showAmountDebug, showAmountWithoutPrice, maxprecision, maxprecisionwithpoint, setAmountPrecision, withPrecision, canonicaliseAmount, -- * MixedAmount nullmixedamt, missingmixedamt, mixed, amounts, filterMixedAmount, filterMixedAmountByCommodity, normaliseMixedAmountSquashPricesForDisplay, normaliseMixedAmount, -- ** arithmetic costOfMixedAmount, divideMixedAmount, averageMixedAmounts, isNegativeAmount, isNegativeMixedAmount, isZeroAmount, isReallyZeroAmount, isZeroMixedAmount, isReallyZeroMixedAmount, isReallyZeroMixedAmountCost, -- ** rendering showMixedAmount, showMixedAmountOneLine, showMixedAmountDebug, showMixedAmountWithoutPrice, showMixedAmountOneLineWithoutPrice, showMixedAmountWithZeroCommodity, showMixedAmountWithPrecision, setMixedAmountPrecision, canonicaliseMixedAmount, -- * misc. ltraceamount, tests_Hledger_Data_Amount ) where import Data.Char (isDigit) import Data.Decimal (roundTo) import Data.Function (on) import Data.List import Data.Map (findWithDefault) import Data.Maybe -- import Data.Text (Text) import qualified Data.Text as T import Test.HUnit import Text.Printf import qualified Data.Map as M import Hledger.Data.Types import Hledger.Data.Commodity import Hledger.Utils deriving instance Show MarketPrice amountstyle = AmountStyle L False 0 (Just '.') Nothing ------------------------------------------------------------------------------- -- Amount instance Show Amount where show _a@Amount{..} -- debugLevel < 2 = showAmountWithoutPrice a -- debugLevel < 3 = showAmount a | debugLevel < 6 = printf "Amount {acommodity=%s, aquantity=%s, ..}" (show acommodity) (show aquantity) | otherwise = --showAmountDebug a printf "Amount {acommodity=%s, aquantity=%s, aprice=%s, astyle=%s}" (show acommodity) (show aquantity) (showPriceDebug aprice) (show astyle) instance Num Amount where abs a@Amount{aquantity=q} = a{aquantity=abs q} signum a@Amount{aquantity=q} = a{aquantity=signum q} fromInteger i = nullamt{aquantity=fromInteger i} negate a@Amount{aquantity=q} = a{aquantity=(-q)} (+) = similarAmountsOp (+) (-) = similarAmountsOp (-) (*) = similarAmountsOp (*) -- | The empty simple amount. amount, nullamt :: Amount amount = Amount{acommodity="", aquantity=0, aprice=NoPrice, astyle=amountstyle} nullamt = amount -- | A temporary value for parsed transactions which had no amount specified. missingamt :: Amount missingamt = amount{acommodity="AUTO"} -- Handy amount constructors for tests. -- usd/eur/gbp round their argument to a whole number of pennies/cents. num n = amount{acommodity="", aquantity=n} hrs n = amount{acommodity="h", aquantity=n, astyle=amountstyle{asprecision=2, ascommodityside=R}} usd n = amount{acommodity="$", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=2}} eur n = amount{acommodity="€", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=2}} gbp n = amount{acommodity="£", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=2}} amt `at` priceamt = amt{aprice=UnitPrice priceamt} amt @@ priceamt = amt{aprice=TotalPrice priceamt} -- | Apply a binary arithmetic operator to two amounts, which should -- be in the same commodity if non-zero (warning, this is not checked). -- A zero result keeps the commodity of the second amount. -- The result's display style is that of the second amount, with -- precision set to the highest of either amount. -- Prices are ignored and discarded. -- Remember: the caller is responsible for ensuring both amounts have the same commodity. similarAmountsOp :: (Quantity -> Quantity -> Quantity) -> Amount -> Amount -> Amount similarAmountsOp op Amount{acommodity=_, aquantity=q1, astyle=AmountStyle{asprecision=p1}} Amount{acommodity=c2, aquantity=q2, astyle=s2@AmountStyle{asprecision=p2}} = -- trace ("a1:"++showAmountDebug a1) $ trace ("a2:"++showAmountDebug a2) $ traceWith (("= :"++).showAmountDebug) amount{acommodity=c2, aquantity=q1 `op` q2, astyle=s2{asprecision=max p1 p2}} -- c1==c2 || q1==0 || q2==0 = -- otherwise = error "tried to do simple arithmetic with amounts in different commodities" -- | Convert an amount to the specified commodity, ignoring and discarding -- any assigned prices and assuming an exchange rate of 1. amountWithCommodity :: CommoditySymbol -> Amount -> Amount amountWithCommodity c a = a{acommodity=c, aprice=NoPrice} -- | Convert an amount to the commodity of its assigned price, if any. Notes: -- -- - price amounts must be MixedAmounts with exactly one component Amount (or there will be a runtime error) XXX -- -- - price amounts should be positive, though this is not currently enforced costOfAmount :: Amount -> Amount costOfAmount a@Amount{aquantity=q, aprice=price} = case price of NoPrice -> a UnitPrice p@Amount{aquantity=pq} -> p{aquantity=pq * q} TotalPrice p@Amount{aquantity=pq} -> p{aquantity=pq * signum q} -- | Divide an amount's quantity by a constant. divideAmount :: Amount -> Quantity -> Amount divideAmount a@Amount{aquantity=q} d = a{aquantity=q/d} -- | Is this amount negative ? The price is ignored. isNegativeAmount :: Amount -> Bool isNegativeAmount Amount{aquantity=q} = q < 0 digits = "123456789" :: String -- | Does this amount appear to be zero when displayed with its given precision ? isZeroAmount :: Amount -> Bool isZeroAmount a -- a==missingamt = False | otherwise = (null . filter (`elem` digits) . showAmountWithoutPriceOrCommodity) a -- | Is this amount "really" zero, regardless of the display precision ? isReallyZeroAmount :: Amount -> Bool isReallyZeroAmount Amount{aquantity=q} = q == 0 -- | Get the string representation of an amount, based on its commodity's -- display settings except using the specified precision. showAmountWithPrecision :: Int -> Amount -> String showAmountWithPrecision p = showAmount . setAmountPrecision p -- | Set an amount's display precision. setAmountPrecision :: Int -> Amount -> Amount setAmountPrecision p a@Amount{astyle=s} = a{astyle=s{asprecision=p}} -- | Set an amount's display precision, flipped. withPrecision :: Amount -> Int -> Amount withPrecision = flip setAmountPrecision -- | Get a string representation of an amount for debugging, -- appropriate to the current debug level. 9 shows maximum detail. showAmountDebug :: Amount -> String showAmountDebug Amount{acommodity="AUTO"} = "(missing)" showAmountDebug Amount{..} = printf "Amount {acommodity=%s, aquantity=%s, aprice=%s, astyle=%s}" (show acommodity) (show aquantity) (showPriceDebug aprice) (show astyle) -- | Get the string representation of an amount, without any \@ price. showAmountWithoutPrice :: Amount -> String showAmountWithoutPrice a = showAmount a{aprice=NoPrice} -- | Get the string representation of an amount, without any price or commodity symbol. showAmountWithoutPriceOrCommodity :: Amount -> String showAmountWithoutPriceOrCommodity a = showAmount a{acommodity="", aprice=NoPrice} showPrice :: Price -> String showPrice NoPrice = "" showPrice (UnitPrice pa) = " @ " ++ showAmount pa showPrice (TotalPrice pa) = " @@ " ++ showAmount pa showPriceDebug :: Price -> String showPriceDebug NoPrice = "" showPriceDebug (UnitPrice pa) = " @ " ++ showAmountDebug pa showPriceDebug (TotalPrice pa) = " @@ " ++ showAmountDebug pa -- | Get the string representation of an amount, based on its -- commodity's display settings. String representations equivalent to -- zero are converted to just \"0\". The special "missing" amount is -- displayed as the empty string. showAmount :: Amount -> String showAmount = showAmountHelper False showAmountHelper :: Bool -> Amount -> String showAmountHelper _ Amount{acommodity="AUTO"} = "" showAmountHelper showzerocommodity a@(Amount{acommodity=c, aprice=p, astyle=AmountStyle{..}}) = case ascommodityside of L -> printf "%s%s%s%s" (T.unpack c') space quantity' price R -> printf "%s%s%s%s" quantity' space (T.unpack c') price where quantity = showamountquantity a displayingzero = null $ filter (`elem` digits) $ quantity (quantity',c') | displayingzero && not showzerocommodity = ("0","") | otherwise = (quantity, quoteCommoditySymbolIfNeeded c) space = if (not (T.null c') && ascommodityspaced) then " " else "" :: String price = showPrice p -- | Like showAmount, but show a zero amount's commodity if it has one. showAmountWithZeroCommodity :: Amount -> String showAmountWithZeroCommodity = showAmountHelper True -- | Get the string representation of the number part of of an amount, -- using the display settings from its commodity. showamountquantity :: Amount -> String showamountquantity Amount{aquantity=q, astyle=AmountStyle{asprecision=p, asdecimalpoint=mdec, asdigitgroups=mgrps}} = punctuatenumber (fromMaybe '.' mdec) mgrps $ qstr where -- isint n = fromIntegral (round n) == n qstr -- p == maxprecision && isint q = printf "%d" (round q::Integer) | p == maxprecisionwithpoint = show q | p == maxprecision = chopdotzero $ show q | otherwise = show $ roundTo (fromIntegral p) q -- | Replace a number string's decimal point with the specified character, -- and add the specified digit group separators. The last digit group will -- be repeated as needed. punctuatenumber :: Char -> Maybe DigitGroupStyle -> String -> String punctuatenumber dec mgrps s = sign ++ reverse (applyDigitGroupStyle mgrps (reverse int)) ++ frac'' where (sign,num) = break isDigit s (int,frac) = break (=='.') num frac' = dropWhile (=='.') frac frac'' | null frac' = "" | otherwise = dec:frac' applyDigitGroupStyle :: Maybe DigitGroupStyle -> String -> String applyDigitGroupStyle Nothing s = s applyDigitGroupStyle (Just (DigitGroups c gs)) s = addseps (repeatLast gs) s where addseps [] s = s addseps (g:gs) s | length s <= g = s | otherwise = let (part,rest) = splitAt g s in part ++ [c] ++ addseps gs rest repeatLast [] = [] repeatLast gs = init gs ++ repeat (last gs) chopdotzero str = reverse $ case reverse str of '0':'.':s -> s s -> s -- | For rendering: a special precision value which means show all available digits. maxprecision :: Int maxprecision = 999998 -- | For rendering: a special precision value which forces display of a decimal point. maxprecisionwithpoint :: Int maxprecisionwithpoint = 999999 -- like journalCanonicaliseAmounts -- | Canonicalise an amount's display style using the provided commodity style map. canonicaliseAmount :: M.Map CommoditySymbol AmountStyle -> Amount -> Amount canonicaliseAmount styles a@Amount{acommodity=c, astyle=s} = a{astyle=s'} where s' = findWithDefault s c styles ------------------------------------------------------------------------------- -- MixedAmount instance Show MixedAmount where show | debugLevel < 3 = intercalate "\\n" . lines . showMixedAmountWithoutPrice -- debugLevel < 6 = intercalate "\\n" . lines . showMixedAmount | otherwise = showMixedAmountDebug instance Num MixedAmount where fromInteger i = Mixed [fromInteger i] negate (Mixed as) = Mixed $ map negate as (+) (Mixed as) (Mixed bs) = normaliseMixedAmount $ Mixed $ as ++ bs (*) = error' "error, mixed amounts do not support multiplication" abs = error' "error, mixed amounts do not support abs" signum = error' "error, mixed amounts do not support signum" -- | The empty mixed amount. nullmixedamt :: MixedAmount nullmixedamt = Mixed [] -- | A temporary value for parsed transactions which had no amount specified. missingmixedamt :: MixedAmount missingmixedamt = Mixed [missingamt] -- | Convert amounts in various commodities into a normalised MixedAmount. mixed :: [Amount] -> MixedAmount mixed = normaliseMixedAmount . Mixed -- | Simplify a mixed amount's component amounts: -- -- * amounts in the same commodity are combined unless they have different prices or total prices -- -- * multiple zero amounts, all with the same non-null commodity, are replaced by just the last of them, preserving the commodity and amount style (all but the last zero amount are discarded) -- -- * multiple zero amounts with multiple commodities, or no commodities, are replaced by one commodity-less zero amount -- -- * an empty amount list is replaced by one commodity-less zero amount -- -- * the special "missing" mixed amount remains unchanged -- normaliseMixedAmount :: MixedAmount -> MixedAmount normaliseMixedAmount = normaliseHelper False normaliseHelper :: Bool -> MixedAmount -> MixedAmount normaliseHelper squashprices (Mixed as) | missingamt `elem` as = missingmixedamt -- missingamt should always be alone, but detect it even if not | null nonzeros = Mixed [newzero] | otherwise = Mixed nonzeros where newzero = case filter (/= "") (map acommodity zeros) of _:_ -> last zeros _ -> nullamt (zeros, nonzeros) = partition isReallyZeroAmount $ map sumSimilarAmountsUsingFirstPrice $ groupBy groupfn $ sortBy sortfn $ as sortfn | squashprices = compare `on` acommodity | otherwise = compare `on` \a -> (acommodity a, aprice a) groupfn | squashprices = (==) `on` acommodity | otherwise = \a1 a2 -> acommodity a1 == acommodity a2 && combinableprices a1 a2 combinableprices Amount{aprice=NoPrice} Amount{aprice=NoPrice} = True combinableprices Amount{aprice=UnitPrice p1} Amount{aprice=UnitPrice p2} = p1 == p2 combinableprices _ _ = False tests_normaliseMixedAmount = [ "normaliseMixedAmount" ~: do -- assertEqual "missing amount is discarded" (Mixed [nullamt]) (normaliseMixedAmount $ Mixed [usd 0, missingamt]) assertEqual "any missing amount means a missing mixed amount" missingmixedamt (normaliseMixedAmount $ Mixed [usd 0, missingamt]) assertEqual "unpriced same-commodity amounts are combined" (Mixed [usd 2]) (normaliseMixedAmount $ Mixed [usd 0, usd 2]) -- amounts with same unit price are combined normaliseMixedAmount (Mixed [usd 1 `at` eur 1, usd 1 `at` eur 1]) `is` Mixed [usd 2 `at` eur 1] -- amounts with different unit prices are not combined normaliseMixedAmount (Mixed [usd 1 `at` eur 1, usd 1 `at` eur 2]) `is` Mixed [usd 1 `at` eur 1, usd 1 `at` eur 2] -- amounts with total prices are not combined normaliseMixedAmount (Mixed [usd 1 @@ eur 1, usd 1 @@ eur 1]) `is` Mixed [usd 1 @@ eur 1, usd 1 @@ eur 1] ] -- | Like normaliseMixedAmount, but combine each commodity's amounts -- into just one by throwing away all prices except the first. This is -- only used as a rendering helper, and could show a misleading price. normaliseMixedAmountSquashPricesForDisplay :: MixedAmount -> MixedAmount normaliseMixedAmountSquashPricesForDisplay = normaliseHelper True tests_normaliseMixedAmountSquashPricesForDisplay = [ "normaliseMixedAmountSquashPricesForDisplay" ~: do normaliseMixedAmountSquashPricesForDisplay (Mixed []) `is` Mixed [nullamt] assertBool "" $ isZeroMixedAmount $ normaliseMixedAmountSquashPricesForDisplay (Mixed [usd 10 ,usd 10 @@ eur 7 ,usd (-10) ,usd (-10) @@ eur 7 ]) ] -- | Sum same-commodity amounts in a lossy way, applying the first -- price to the result and discarding any other prices. Only used as a -- rendering helper. sumSimilarAmountsUsingFirstPrice :: [Amount] -> Amount sumSimilarAmountsUsingFirstPrice [] = nullamt sumSimilarAmountsUsingFirstPrice as = (sumStrict as){aprice=aprice $ head as} -- -- | Sum same-commodity amounts. If there were different prices, set -- -- the price to a special marker indicating "various". Only used as a -- -- rendering helper. -- sumSimilarAmountsNotingPriceDifference :: [Amount] -> Amount -- sumSimilarAmountsNotingPriceDifference [] = nullamt -- sumSimilarAmountsNotingPriceDifference as = undefined -- | Get a mixed amount's component amounts. amounts :: MixedAmount -> [Amount] amounts (Mixed as) = as -- | Filter a mixed amount's component amounts by a predicate. filterMixedAmount :: (Amount -> Bool) -> MixedAmount -> MixedAmount filterMixedAmount p (Mixed as) = Mixed $ filter p as -- | Return an unnormalised MixedAmount containing exactly one Amount -- with the specified commodity and the quantity of that commodity -- found in the original. NB if Amount's quantity is zero it will be -- discarded next time the MixedAmount gets normalised. filterMixedAmountByCommodity :: CommoditySymbol -> MixedAmount -> MixedAmount filterMixedAmountByCommodity c (Mixed as) = Mixed as' where as' = case filter ((==c) . acommodity) as of [] -> [nullamt{acommodity=c}] as'' -> [sum as''] -- | Convert a mixed amount's component amounts to the commodity of their -- assigned price, if any. costOfMixedAmount :: MixedAmount -> MixedAmount costOfMixedAmount (Mixed as) = Mixed $ map costOfAmount as -- | Divide a mixed amount's quantities by a constant. divideMixedAmount :: MixedAmount -> Quantity -> MixedAmount divideMixedAmount (Mixed as) d = Mixed $ map (flip divideAmount d) as -- | Calculate the average of some mixed amounts. averageMixedAmounts :: [MixedAmount] -> MixedAmount averageMixedAmounts [] = 0 averageMixedAmounts as = sum as `divideMixedAmount` fromIntegral (length as) -- | Is this mixed amount negative, if it can be normalised to a single commodity ? isNegativeMixedAmount :: MixedAmount -> Maybe Bool isNegativeMixedAmount m = case as of [a] -> Just $ isNegativeAmount a _ -> Nothing where as = amounts $ normaliseMixedAmountSquashPricesForDisplay m -- | Does this mixed amount appear to be zero when displayed with its given precision ? isZeroMixedAmount :: MixedAmount -> Bool isZeroMixedAmount = all isZeroAmount . amounts . normaliseMixedAmountSquashPricesForDisplay -- | Is this mixed amount "really" zero ? See isReallyZeroAmount. isReallyZeroMixedAmount :: MixedAmount -> Bool isReallyZeroMixedAmount = all isReallyZeroAmount . amounts . normaliseMixedAmountSquashPricesForDisplay -- | Is this mixed amount "really" zero, after converting to cost -- commodities where possible ? isReallyZeroMixedAmountCost :: MixedAmount -> Bool isReallyZeroMixedAmountCost = isReallyZeroMixedAmount . costOfMixedAmount -- -- | MixedAmount derived Eq instance in Types.hs doesn't know that we -- -- want $0 = EUR0 = 0. Yet we don't want to drag all this code over there. -- -- For now, use this when cross-commodity zero equality is important. -- mixedAmountEquals :: MixedAmount -> MixedAmount -> Bool -- mixedAmountEquals a b = amounts a' == amounts b' || (isZeroMixedAmount a' && isZeroMixedAmount b') -- where a' = normaliseMixedAmountSquashPricesForDisplay a -- b' = normaliseMixedAmountSquashPricesForDisplay b -- | Get the string representation of a mixed amount, after -- normalising it to one amount per commodity. Assumes amounts have -- no or similar prices, otherwise this can show misleading prices. showMixedAmount :: MixedAmount -> String showMixedAmount = showMixedAmountHelper False False -- | Like showMixedAmount, but zero amounts are shown with their -- commodity if they have one. showMixedAmountWithZeroCommodity :: MixedAmount -> String showMixedAmountWithZeroCommodity = showMixedAmountHelper True False -- | Get the one-line string representation of a mixed amount. showMixedAmountOneLine :: MixedAmount -> String showMixedAmountOneLine = showMixedAmountHelper False True showMixedAmountHelper :: Bool -> Bool -> MixedAmount -> String showMixedAmountHelper showzerocommodity useoneline m = join $ map showamt $ amounts $ normaliseMixedAmountSquashPricesForDisplay m where join | useoneline = intercalate ", " | otherwise = vConcatRightAligned showamt | showzerocommodity = showAmountWithZeroCommodity | otherwise = showAmount -- | Compact labelled trace of a mixed amount, for debugging. ltraceamount :: String -> MixedAmount -> MixedAmount ltraceamount s = traceWith (((s ++ ": ") ++).showMixedAmount) -- | Set the display precision in the amount's commodities. setMixedAmountPrecision :: Int -> MixedAmount -> MixedAmount setMixedAmountPrecision p (Mixed as) = Mixed $ map (setAmountPrecision p) as -- | Get the string representation of a mixed amount, showing each of its -- component amounts with the specified precision, ignoring their -- commoditys' display precision settings. showMixedAmountWithPrecision :: Int -> MixedAmount -> String showMixedAmountWithPrecision p m = vConcatRightAligned $ map (showAmountWithPrecision p) $ amounts $ normaliseMixedAmountSquashPricesForDisplay m -- | Get an unambiguous string representation of a mixed amount for debugging. showMixedAmountDebug :: MixedAmount -> String showMixedAmountDebug m | m == missingmixedamt = "(missing)" | otherwise = printf "Mixed [%s]" as where as = intercalate "\n " $ map showAmountDebug $ amounts m -- | Get the string representation of a mixed amount, but without -- any \@ prices. showMixedAmountWithoutPrice :: MixedAmount -> String showMixedAmountWithoutPrice m = concat $ intersperse "\n" $ map showfixedwidth as where (Mixed as) = normaliseMixedAmountSquashPricesForDisplay $ stripPrices m stripPrices (Mixed as) = Mixed $ map stripprice as where stripprice a = a{aprice=NoPrice} width = maximum $ map (length . showAmount) as showfixedwidth = printf (printf "%%%ds" width) . showAmountWithoutPrice -- | Get the one-line string representation of a mixed amount, but without -- any \@ prices. showMixedAmountOneLineWithoutPrice :: MixedAmount -> String showMixedAmountOneLineWithoutPrice m = concat $ intersperse ", " $ map showAmountWithoutPrice as where (Mixed as) = normaliseMixedAmountSquashPricesForDisplay $ stripPrices m stripPrices (Mixed as) = Mixed $ map stripprice as where stripprice a = a{aprice=NoPrice} -- | Canonicalise a mixed amount's display styles using the provided commodity style map. canonicaliseMixedAmount :: M.Map CommoditySymbol AmountStyle -> MixedAmount -> MixedAmount canonicaliseMixedAmount styles (Mixed as) = Mixed $ map (canonicaliseAmount styles) as ------------------------------------------------------------------------------- -- misc tests_Hledger_Data_Amount = TestList $ tests_normaliseMixedAmount ++ tests_normaliseMixedAmountSquashPricesForDisplay ++ [ -- Amount "costOfAmount" ~: do costOfAmount (eur 1) `is` eur 1 costOfAmount (eur 2){aprice=UnitPrice $ usd 2} `is` usd 4 costOfAmount (eur 1){aprice=TotalPrice $ usd 2} `is` usd 2 costOfAmount (eur (-1)){aprice=TotalPrice $ usd 2} `is` usd (-2) ,"isZeroAmount" ~: do assertBool "" $ isZeroAmount $ amount assertBool "" $ isZeroAmount $ usd 0 ,"negating amounts" ~: do let a = usd 1 negate a `is` a{aquantity=(-1)} let b = (usd 1){aprice=UnitPrice $ eur 2} negate b `is` b{aquantity=(-1)} ,"adding amounts without prices" ~: do let a1 = usd 1.23 let a2 = usd (-1.23) let a3 = usd (-1.23) (a1 + a2) `is` usd 0 (a1 + a3) `is` usd 0 (a2 + a3) `is` usd (-2.46) (a3 + a3) `is` usd (-2.46) sum [a1,a2,a3,-a3] `is` usd 0 -- highest precision is preserved let ap1 = usd 1 `withPrecision` 1 ap3 = usd 1 `withPrecision` 3 (asprecision $ astyle $ sum [ap1,ap3]) `is` 3 (asprecision $ astyle $ sum [ap3,ap1]) `is` 3 -- adding different commodities assumes conversion rate 1 assertBool "" $ isZeroAmount (a1 - eur 1.23) ,"showAmount" ~: do showAmount (usd 0 + gbp 0) `is` "0" -- MixedAmount ,"adding mixed amounts to zero, the commodity and amount style are preserved" ~: do (sum $ map (Mixed . (:[])) [usd 1.25 ,usd (-1) `withPrecision` 3 ,usd (-0.25) ]) `is` Mixed [usd 0 `withPrecision` 3] ,"adding mixed amounts with total prices" ~: do (sum $ map (Mixed . (:[])) [usd 1 @@ eur 1 ,usd (-2) @@ eur 1 ]) `is` (Mixed [usd 1 @@ eur 1 ,usd (-2) @@ eur 1 ]) ,"showMixedAmount" ~: do showMixedAmount (Mixed [usd 1]) `is` "$1.00" showMixedAmount (Mixed [usd 1 `at` eur 2]) `is` "$1.00 @ €2.00" showMixedAmount (Mixed [usd 0]) `is` "0" showMixedAmount (Mixed []) `is` "0" showMixedAmount missingmixedamt `is` "" ,"showMixedAmountWithoutPrice" ~: do let a = usd 1 `at` eur 2 showMixedAmountWithoutPrice (Mixed [a]) `is` "$1.00" showMixedAmountWithoutPrice (Mixed [a, (-a)]) `is` "0" ] hledger-lib-1.2/Hledger/Data/Commodity.hs0000644000000000000000000000440013035210046016433 0ustar0000000000000000{-| A 'Commodity' is a symbol representing a currency or some other kind of thing we are tracking, and some display preferences that tell how to display 'Amount's of the commodity - is the symbol on the left or right, are thousands separated by comma, significant decimal places and so on. -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Commodity where import Data.List import Data.Maybe (fromMaybe) import Data.Monoid -- import Data.Text (Text) import qualified Data.Text as T import Test.HUnit -- import qualified Data.Map as M import Hledger.Data.Types import Hledger.Utils -- characters that may not be used in a non-quoted commodity symbol nonsimplecommoditychars = "0123456789-+.@;\n \"{}=" :: [Char] quoteCommoditySymbolIfNeeded s | any (`elem` nonsimplecommoditychars) (T.unpack s) = "\"" <> s <> "\"" | otherwise = s commodity = "" -- handy constructors for tests -- unknown = commodity -- usd = "$" -- eur = "€" -- gbp = "£" -- hour = "h" -- Some sample commodity' names and symbols, for use in tests.. commoditysymbols = [("unknown","") ,("usd","$") ,("eur","€") ,("gbp","£") ,("hour","h") ] -- | Look up one of the sample commodities' symbol by name. comm :: String -> CommoditySymbol comm name = snd $ fromMaybe (error' "commodity lookup failed") (find (\n -> fst n == name) commoditysymbols) -- | Find the conversion rate between two commodities. Currently returns 1. conversionRate :: CommoditySymbol -> CommoditySymbol -> Double conversionRate _ _ = 1 -- -- | Convert a list of commodities to a map from commodity symbols to -- -- unique, display-preference-canonicalised commodities. -- canonicaliseCommodities :: [CommoditySymbol] -> Map.Map String CommoditySymbol -- canonicaliseCommodities cs = -- Map.fromList [(s,firstc{precision=maxp}) | s <- symbols, -- let cs = commoditymap ! s, -- let firstc = head cs, -- let maxp = maximum $ map precision cs -- ] -- where -- commoditymap = Map.fromList [(s, commoditieswithsymbol s) | s <- symbols] -- commoditieswithsymbol s = filter ((s==) . symbol) cs -- symbols = nub $ map symbol cs tests_Hledger_Data_Commodity = TestList [ ] hledger-lib-1.2/Hledger/Data/Dates.hs0000644000000000000000000007042613035510426015547 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE NoMonoLocalBinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE OverloadedStrings #-} {-| Date parsing and utilities for hledger. For date and time values, we use the standard Day and UTCTime types. A 'SmartDate' is a date which may be partially-specified or relative. Eg 2008\/12\/31, but also 2008\/12, 12\/31, tomorrow, last week, next year. We represent these as a triple of strings like (\"2008\",\"12\",\"\"), (\"\",\"\",\"tomorrow\"), (\"\",\"last\",\"week\"). A 'DateSpan' is the span of time between two specific calendar dates, or an open-ended span where one or both dates are unspecified. (A date span with both ends unspecified matches all dates.) An 'Interval' is ledger's \"reporting interval\" - weekly, monthly, quarterly, etc. 'Period' will probably replace DateSpan in due course. -} -- XXX fromGregorian silently clips bad dates, use fromGregorianValid instead ? module Hledger.Data.Dates ( -- * Misc date handling utilities getCurrentDay, getCurrentMonth, getCurrentYear, nulldate, spanContainsDate, periodContainsDate, parsedateM, parsedate, showDate, showDateSpan, elapsedSeconds, prevday, parsePeriodExpr, nulldatespan, failIfInvalidYear, failIfInvalidMonth, failIfInvalidDay, datesepchar, datesepchars, spanStart, spanEnd, spansSpan, spanIntersect, spansIntersect, spanDefaultsFrom, spanUnion, spansUnion, smartdate, splitSpan, fixSmartDate, fixSmartDateStr, fixSmartDateStrEither, fixSmartDateStrEither', daysInSpan, maybePeriod, mkdatespan, ) where import Prelude () import Prelude.Compat import Control.Monad import Data.List.Compat import Data.Maybe import Data.Text (Text) import qualified Data.Text as T #if MIN_VERSION_time(1,5,0) import Data.Time.Format hiding (months) #else import Data.Time.Format import System.Locale (TimeLocale, defaultTimeLocale) #endif import Data.Time.Calendar import Data.Time.Calendar.OrdinalDate import Data.Time.Clock import Data.Time.LocalTime import Safe (headMay, lastMay, readMay) import Text.Megaparsec import Text.Megaparsec.Text import Text.Printf import Hledger.Data.Types import Hledger.Data.Period import Hledger.Utils -- Help ppShow parse and line-wrap DateSpans better in debug output. instance Show DateSpan where show s = "DateSpan " ++ showDateSpan s -- show s = "DateSpan \"" ++ showDateSpan s ++ "\"" -- quotes to help pretty-show showDate :: Day -> String showDate = formatTime defaultTimeLocale "%0C%y/%m/%d" -- | Render a datespan as a display string, abbreviating into a -- compact form if possible. showDateSpan :: DateSpan -> String showDateSpan = showPeriod . dateSpanAsPeriod -- | Get the current local date. getCurrentDay :: IO Day getCurrentDay = do t <- getZonedTime return $ localDay (zonedTimeToLocalTime t) -- | Get the current local month number. getCurrentMonth :: IO Int getCurrentMonth = do (_,m,_) <- toGregorian `fmap` getCurrentDay return m -- | Get the current local year. getCurrentYear :: IO Integer getCurrentYear = do (y,_,_) <- toGregorian `fmap` getCurrentDay return y elapsedSeconds :: Fractional a => UTCTime -> UTCTime -> a elapsedSeconds t1 = realToFrac . diffUTCTime t1 spanStart :: DateSpan -> Maybe Day spanStart (DateSpan d _) = d spanEnd :: DateSpan -> Maybe Day spanEnd (DateSpan _ d) = d -- might be useful later: http://en.wikipedia.org/wiki/Allen%27s_interval_algebra -- | Get overall span enclosing multiple sequentially ordered spans. spansSpan :: [DateSpan] -> DateSpan spansSpan spans = DateSpan (maybe Nothing spanStart $ headMay spans) (maybe Nothing spanEnd $ lastMay spans) -- | Split a DateSpan into one or more consecutive whole spans of the specified length which enclose it. -- If no interval is specified, the original span is returned. -- -- ==== Examples: -- >>> let t i d1 d2 = splitSpan i $ mkdatespan d1 d2 -- >>> t NoInterval "2008/01/01" "2009/01/01" -- [DateSpan 2008] -- >>> t (Quarters 1) "2008/01/01" "2009/01/01" -- [DateSpan 2008q1,DateSpan 2008q2,DateSpan 2008q3,DateSpan 2008q4] -- >>> splitSpan (Quarters 1) nulldatespan -- [DateSpan -] -- >>> t (Days 1) "2008/01/01" "2008/01/01" -- an empty datespan -- [DateSpan 2008/01/01-2007/12/31] -- >>> t (Quarters 1) "2008/01/01" "2008/01/01" -- [DateSpan 2008/01/01-2007/12/31] -- >>> t (Months 1) "2008/01/01" "2008/04/01" -- [DateSpan 2008/01,DateSpan 2008/02,DateSpan 2008/03] -- >>> t (Months 2) "2008/01/01" "2008/04/01" -- [DateSpan 2008/01/01-2008/02/29,DateSpan 2008/03/01-2008/04/30] -- >>> t (Weeks 1) "2008/01/01" "2008/01/15" -- [DateSpan 2007/12/31w01,DateSpan 2008/01/07w02,DateSpan 2008/01/14w03] -- >>> t (Weeks 2) "2008/01/01" "2008/01/15" -- [DateSpan 2007/12/31-2008/01/13,DateSpan 2008/01/14-2008/01/27] -- >>> t (DayOfMonth 2) "2008/01/01" "2008/04/01" -- [DateSpan 2008/01/02-2008/02/01,DateSpan 2008/02/02-2008/03/01,DateSpan 2008/03/02-2008/04/01] -- >>> t (DayOfWeek 2) "2011/01/01" "2011/01/15" -- [DateSpan 2011/01/04-2011/01/10,DateSpan 2011/01/11-2011/01/17] -- splitSpan :: Interval -> DateSpan -> [DateSpan] splitSpan _ (DateSpan Nothing Nothing) = [DateSpan Nothing Nothing] splitSpan NoInterval s = [s] splitSpan (Days n) s = splitspan startofday (applyN n nextday) s splitSpan (Weeks n) s = splitspan startofweek (applyN n nextweek) s splitSpan (Months n) s = splitspan startofmonth (applyN n nextmonth) s splitSpan (Quarters n) s = splitspan startofquarter (applyN n nextquarter) s splitSpan (Years n) s = splitspan startofyear (applyN n nextyear) s splitSpan (DayOfMonth n) s = splitspan (nthdayofmonthcontaining n) (applyN (n-1) nextday . nextmonth) s splitSpan (DayOfWeek n) s = splitspan (nthdayofweekcontaining n) (applyN (n-1) nextday . nextweek) s -- splitSpan (WeekOfYear n) s = splitspan startofweek (applyN n nextweek) s -- splitSpan (MonthOfYear n) s = splitspan startofmonth (applyN n nextmonth) s -- splitSpan (QuarterOfYear n) s = splitspan startofquarter (applyN n nextquarter) s -- Split the given span using the provided helper functions: -- start is applied to the span's start date to get the first sub-span's start date -- next is applied to a sub-span's start date to get the next sub-span's start date splitspan :: (Day -> Day) -> (Day -> Day) -> DateSpan -> [DateSpan] splitspan _ _ (DateSpan Nothing Nothing) = [] splitspan start next (DateSpan Nothing (Just e)) = splitspan start next (DateSpan (Just $ start e) (Just $ next $ start e)) splitspan start next (DateSpan (Just s) Nothing) = splitspan start next (DateSpan (Just $ start s) (Just $ next $ start s)) splitspan start next span@(DateSpan (Just s) (Just e)) | s == e = [span] | otherwise = splitspan' start next span where splitspan' start next (DateSpan (Just s) (Just e)) | s >= e = [] | otherwise = DateSpan (Just subs) (Just sube) : splitspan' start next (DateSpan (Just sube) (Just e)) where subs = start s sube = next subs splitspan' _ _ _ = error' "won't happen, avoids warnings" -- | Count the days in a DateSpan, or if it is open-ended return Nothing. daysInSpan :: DateSpan -> Maybe Integer daysInSpan (DateSpan (Just d1) (Just d2)) = Just $ diffDays d2 d1 daysInSpan _ = Nothing -- | Does the span include the given date ? spanContainsDate :: DateSpan -> Day -> Bool spanContainsDate (DateSpan Nothing Nothing) _ = True spanContainsDate (DateSpan Nothing (Just e)) d = d < e spanContainsDate (DateSpan (Just b) Nothing) d = d >= b spanContainsDate (DateSpan (Just b) (Just e)) d = d >= b && d < e -- | Does the period include the given date ? -- (Here to avoid import cycle). periodContainsDate :: Period -> Day -> Bool periodContainsDate p = spanContainsDate (periodAsDateSpan p) -- | Calculate the intersection of a number of datespans. spansIntersect [] = nulldatespan spansIntersect [d] = d spansIntersect (d:ds) = d `spanIntersect` (spansIntersect ds) -- | Calculate the intersection of two datespans. spanIntersect (DateSpan b1 e1) (DateSpan b2 e2) = DateSpan b e where b = latest b1 b2 e = earliest e1 e2 -- | Fill any unspecified dates in the first span with the dates from -- the second one. Sort of a one-way spanIntersect. spanDefaultsFrom (DateSpan a1 b1) (DateSpan a2 b2) = DateSpan a b where a = if isJust a1 then a1 else a2 b = if isJust b1 then b1 else b2 -- | Calculate the union of a number of datespans. spansUnion [] = nulldatespan spansUnion [d] = d spansUnion (d:ds) = d `spanUnion` (spansUnion ds) -- | Calculate the union of two datespans. spanUnion (DateSpan b1 e1) (DateSpan b2 e2) = DateSpan b e where b = earliest b1 b2 e = latest e1 e2 latest d Nothing = d latest Nothing d = d latest (Just d1) (Just d2) = Just $ max d1 d2 earliest d Nothing = d earliest Nothing d = d earliest (Just d1) (Just d2) = Just $ min d1 d2 -- | Parse a period expression to an Interval and overall DateSpan using -- the provided reference date, or return a parse error. parsePeriodExpr :: Day -> Text -> Either (ParseError Char Dec) (Interval, DateSpan) parsePeriodExpr refdate = parsewith (periodexpr refdate <* eof) maybePeriod :: Day -> Text -> Maybe (Interval,DateSpan) maybePeriod refdate = either (const Nothing) Just . parsePeriodExpr refdate -- | Show a DateSpan as a human-readable pseudo-period-expression string. -- dateSpanAsText :: DateSpan -> String -- dateSpanAsText (DateSpan Nothing Nothing) = "all" -- dateSpanAsText (DateSpan Nothing (Just e)) = printf "to %s" (show e) -- dateSpanAsText (DateSpan (Just b) Nothing) = printf "from %s" (show b) -- dateSpanAsText (DateSpan (Just b) (Just e)) = printf "%s to %s" (show b) (show e) -- | Convert a single smart date string to a date span using the provided -- reference date, or raise an error. -- spanFromSmartDateString :: Day -> String -> DateSpan -- spanFromSmartDateString refdate s = spanFromSmartDate refdate sdate -- where -- sdate = fromparse $ parsewith smartdateonly s spanFromSmartDate :: Day -> SmartDate -> DateSpan spanFromSmartDate refdate sdate = DateSpan (Just b) (Just e) where (ry,rm,_) = toGregorian refdate (b,e) = span sdate span :: SmartDate -> (Day,Day) span ("","","today") = (refdate, nextday refdate) span ("","this","day") = (refdate, nextday refdate) span ("","","yesterday") = (prevday refdate, refdate) span ("","last","day") = (prevday refdate, refdate) span ("","","tomorrow") = (nextday refdate, addDays 2 refdate) span ("","next","day") = (nextday refdate, addDays 2 refdate) span ("","last","week") = (prevweek refdate, thisweek refdate) span ("","this","week") = (thisweek refdate, nextweek refdate) span ("","next","week") = (nextweek refdate, startofweek $ addDays 14 refdate) span ("","last","month") = (prevmonth refdate, thismonth refdate) span ("","this","month") = (thismonth refdate, nextmonth refdate) span ("","next","month") = (nextmonth refdate, startofmonth $ addGregorianMonthsClip 2 refdate) span ("","last","quarter") = (prevquarter refdate, thisquarter refdate) span ("","this","quarter") = (thisquarter refdate, nextquarter refdate) span ("","next","quarter") = (nextquarter refdate, startofquarter $ addGregorianMonthsClip 6 refdate) span ("","last","year") = (prevyear refdate, thisyear refdate) span ("","this","year") = (thisyear refdate, nextyear refdate) span ("","next","year") = (nextyear refdate, startofyear $ addGregorianYearsClip 2 refdate) span ("","",d) = (day, nextday day) where day = fromGregorian ry rm (read d) span ("",m,"") = (startofmonth day, nextmonth day) where day = fromGregorian ry (read m) 1 span ("",m,d) = (day, nextday day) where day = fromGregorian ry (read m) (read d) span (y,"","") = (startofyear day, nextyear day) where day = fromGregorian (read y) 1 1 span (y,m,"") = (startofmonth day, nextmonth day) where day = fromGregorian (read y) (read m) 1 span (y,m,d) = (day, nextday day) where day = fromGregorian (read y) (read m) (read d) -- showDay :: Day -> String -- showDay day = printf "%04d/%02d/%02d" y m d where (y,m,d) = toGregorian day -- | Convert a smart date string to an explicit yyyy\/mm\/dd string using -- the provided reference date, or raise an error. fixSmartDateStr :: Day -> Text -> String fixSmartDateStr d s = either (\e->error' $ printf "could not parse date %s %s" (show s) (show e)) id $ (fixSmartDateStrEither d s :: Either (ParseError Char Dec) String) -- | A safe version of fixSmartDateStr. fixSmartDateStrEither :: Day -> Text -> Either (ParseError Char Dec) String fixSmartDateStrEither d = either Left (Right . showDate) . fixSmartDateStrEither' d fixSmartDateStrEither' :: Day -> Text -> Either (ParseError Char Dec) Day fixSmartDateStrEither' d s = case parsewith smartdateonly (T.toLower s) of Right sd -> Right $ fixSmartDate d sd Left e -> Left e -- | Convert a SmartDate to an absolute date using the provided reference date. -- -- ==== Examples: -- >>> :set -XOverloadedStrings -- >>> let t = fixSmartDateStr (parsedate "2008/11/26") -- >>> t "0000-01-01" -- "0000/01/01" -- >>> t "1999-12-02" -- "1999/12/02" -- >>> t "1999.12.02" -- "1999/12/02" -- >>> t "1999/3/2" -- "1999/03/02" -- >>> t "19990302" -- "1999/03/02" -- >>> t "2008/2" -- "2008/02/01" -- >>> t "0020/2" -- "0020/02/01" -- >>> t "1000" -- "1000/01/01" -- >>> t "4/2" -- "2008/04/02" -- >>> t "2" -- "2008/11/02" -- >>> t "January" -- "2008/01/01" -- >>> t "feb" -- "2008/02/01" -- >>> t "today" -- "2008/11/26" -- >>> t "yesterday" -- "2008/11/25" -- >>> t "tomorrow" -- "2008/11/27" -- >>> t "this day" -- "2008/11/26" -- >>> t "last day" -- "2008/11/25" -- >>> t "next day" -- "2008/11/27" -- >>> t "this week" -- last monday -- "2008/11/24" -- >>> t "last week" -- previous monday -- "2008/11/17" -- >>> t "next week" -- next monday -- "2008/12/01" -- >>> t "this month" -- "2008/11/01" -- >>> t "last month" -- "2008/10/01" -- >>> t "next month" -- "2008/12/01" -- >>> t "this quarter" -- "2008/10/01" -- >>> t "last quarter" -- "2008/07/01" -- >>> t "next quarter" -- "2009/01/01" -- >>> t "this year" -- "2008/01/01" -- >>> t "last year" -- "2007/01/01" -- >>> t "next year" -- "2009/01/01" -- -- t "last wed" -- "2008/11/19" -- t "next friday" -- "2008/11/28" -- t "next january" -- "2009/01/01" -- fixSmartDate :: Day -> SmartDate -> Day fixSmartDate refdate sdate = fix sdate where fix :: SmartDate -> Day fix ("","","today") = fromGregorian ry rm rd fix ("","this","day") = fromGregorian ry rm rd fix ("","","yesterday") = prevday refdate fix ("","last","day") = prevday refdate fix ("","","tomorrow") = nextday refdate fix ("","next","day") = nextday refdate fix ("","last","week") = prevweek refdate fix ("","this","week") = thisweek refdate fix ("","next","week") = nextweek refdate fix ("","last","month") = prevmonth refdate fix ("","this","month") = thismonth refdate fix ("","next","month") = nextmonth refdate fix ("","last","quarter") = prevquarter refdate fix ("","this","quarter") = thisquarter refdate fix ("","next","quarter") = nextquarter refdate fix ("","last","year") = prevyear refdate fix ("","this","year") = thisyear refdate fix ("","next","year") = nextyear refdate fix ("","",d) = fromGregorian ry rm (read d) fix ("",m,"") = fromGregorian ry (read m) 1 fix ("",m,d) = fromGregorian ry (read m) (read d) fix (y,"","") = fromGregorian (read y) 1 1 fix (y,m,"") = fromGregorian (read y) (read m) 1 fix (y,m,d) = fromGregorian (read y) (read m) (read d) (ry,rm,rd) = toGregorian refdate prevday :: Day -> Day prevday = addDays (-1) nextday = addDays 1 startofday = id thisweek = startofweek prevweek = startofweek . addDays (-7) nextweek = startofweek . addDays 7 startofweek day = fromMondayStartWeek y w 1 where (y,_,_) = toGregorian day (w,_) = mondayStartWeek day thismonth = startofmonth prevmonth = startofmonth . addGregorianMonthsClip (-1) nextmonth = startofmonth . addGregorianMonthsClip 1 startofmonth day = fromGregorian y m 1 where (y,m,_) = toGregorian day thisquarter = startofquarter prevquarter = startofquarter . addGregorianMonthsClip (-3) nextquarter = startofquarter . addGregorianMonthsClip 3 startofquarter day = fromGregorian y (firstmonthofquarter m) 1 where (y,m,_) = toGregorian day firstmonthofquarter m = ((m-1) `div` 3) * 3 + 1 thisyear = startofyear prevyear = startofyear . addGregorianYearsClip (-1) nextyear = startofyear . addGregorianYearsClip 1 startofyear day = fromGregorian y 1 1 where (y,_,_) = toGregorian day nthdayofmonthcontaining n d | d1 >= d = d1 | otherwise = d2 where d1 = addDays (fromIntegral n-1) s d2 = addDays (fromIntegral n-1) $ nextmonth s s = startofmonth d nthdayofweekcontaining n d | d1 >= d = d1 | otherwise = d2 where d1 = addDays (fromIntegral n-1) s d2 = addDays (fromIntegral n-1) $ nextweek s s = startofweek d ---------------------------------------------------------------------- -- parsing -- -- | Parse a couple of date-time string formats to a time type. -- parsedatetimeM :: String -> Maybe LocalTime -- parsedatetimeM s = firstJust [ -- parseTime defaultTimeLocale "%Y/%m/%d %H:%M:%S" s, -- parseTime defaultTimeLocale "%Y-%m-%d %H:%M:%S" s -- ] parsetime :: ParseTime t => TimeLocale -> String -> String -> Maybe t parsetime = #if MIN_VERSION_time(1,5,0) parseTimeM True #else parseTime #endif -- | Parse a couple of date string formats to a time type. parsedateM :: String -> Maybe Day parsedateM s = firstJust [ parsetime defaultTimeLocale "%Y/%m/%d" s, parsetime defaultTimeLocale "%Y-%m-%d" s ] -- -- | Parse a date-time string to a time type, or raise an error. -- parsedatetime :: String -> LocalTime -- parsedatetime s = fromMaybe (error' $ "could not parse timestamp \"" ++ s ++ "\"") -- (parsedatetimeM s) -- | Parse a YYYY-MM-DD or YYYY/MM/DD date string to a Day, or raise an error. For testing/debugging. -- -- >>> parsedate "2008/02/03" -- 2008-02-03 parsedate :: String -> Day parsedate s = fromMaybe (error' $ "could not parse date \"" ++ s ++ "\"") (parsedateM s) -- doctests I haven't been able to make compatible with both GHC 7 and 8 -- -- >>> parsedate "2008/02/03/" -- -- *** Exception: could not parse date "2008/02/03/" -- #if MIN_VERSION_base(4,9,0) -- -- ... -- #endif -- #if MIN_VERSION_time(1,6,0) -- -- >>> parsedate "2008/02/30" -- with time >= 1.6, invalid dates are rejected -- -- *** Exception: could not parse date "2008/02/30" -- #if MIN_VERSION_base(4,9,0) -- -- ... -- #endif -- #else -- -- >>> parsedate "2008/02/30" -- with time < 1.6, they are silently adjusted -- -- 2008-02-29 -- #endif -- | Parse a time string to a time type using the provided pattern, or -- return the default. _parsetimewith :: ParseTime t => String -> String -> t -> t _parsetimewith pat s def = fromMaybe def $ parsetime defaultTimeLocale pat s {-| Parse a date in any of the formats allowed in ledger's period expressions, and maybe some others: > 2004 > 2004/10 > 2004/10/1 > 10/1 > 21 > october, oct > yesterday, today, tomorrow > this/next/last week/day/month/quarter/year Returns a SmartDate, to be converted to a full date later (see fixSmartDate). Assumes any text in the parse stream has been lowercased. -} smartdate :: Parser SmartDate smartdate = do -- XXX maybe obscures date errors ? see ledgerdate (y,m,d) <- choice' [yyyymmdd, ymd, ym, md, y, d, month, mon, today, yesterday, tomorrow, lastthisnextthing] return (y,m,d) -- | Like smartdate, but there must be nothing other than whitespace after the date. smartdateonly :: Parser SmartDate smartdateonly = do d <- smartdate many spacenonewline eof return d datesepchars :: [Char] datesepchars = "/-." datesepchar :: TextParser m Char datesepchar = oneOf datesepchars validYear, validMonth, validDay :: String -> Bool validYear s = length s >= 4 && isJust (readMay s :: Maybe Year) validMonth s = maybe False (\n -> n>=1 && n<=12) $ readMay s validDay s = maybe False (\n -> n>=1 && n<=31) $ readMay s failIfInvalidYear, failIfInvalidMonth, failIfInvalidDay :: (Monad m) => String -> m () failIfInvalidYear s = unless (validYear s) $ fail $ "bad year number: " ++ s failIfInvalidMonth s = unless (validMonth s) $ fail $ "bad month number: " ++ s failIfInvalidDay s = unless (validDay s) $ fail $ "bad day number: " ++ s yyyymmdd :: Parser SmartDate yyyymmdd = do y <- count 4 digitChar m <- count 2 digitChar failIfInvalidMonth m d <- count 2 digitChar failIfInvalidDay d return (y,m,d) ymd :: Parser SmartDate ymd = do y <- some digitChar failIfInvalidYear y sep <- datesepchar m <- some digitChar failIfInvalidMonth m char sep d <- some digitChar failIfInvalidDay d return $ (y,m,d) ym :: Parser SmartDate ym = do y <- some digitChar failIfInvalidYear y datesepchar m <- some digitChar failIfInvalidMonth m return (y,m,"") y :: Parser SmartDate y = do y <- some digitChar failIfInvalidYear y return (y,"","") d :: Parser SmartDate d = do d <- some digitChar failIfInvalidDay d return ("","",d) md :: Parser SmartDate md = do m <- some digitChar failIfInvalidMonth m datesepchar d <- some digitChar failIfInvalidDay d return ("",m,d) months = ["january","february","march","april","may","june", "july","august","september","october","november","december"] monthabbrevs = ["jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"] -- weekdays = ["monday","tuesday","wednesday","thursday","friday","saturday","sunday"] -- weekdayabbrevs = ["mon","tue","wed","thu","fri","sat","sun"] monthIndex s = maybe 0 (+1) $ lowercase s `elemIndex` months monIndex s = maybe 0 (+1) $ lowercase s `elemIndex` monthabbrevs month :: Parser SmartDate month = do m <- choice $ map (try . string) months let i = monthIndex m return ("",show i,"") mon :: Parser SmartDate mon = do m <- choice $ map (try . string) monthabbrevs let i = monIndex m return ("",show i,"") today,yesterday,tomorrow :: Parser SmartDate today = string "today" >> return ("","","today") yesterday = string "yesterday" >> return ("","","yesterday") tomorrow = string "tomorrow" >> return ("","","tomorrow") lastthisnextthing :: Parser SmartDate lastthisnextthing = do r <- choice [ string "last" ,string "this" ,string "next" ] many spacenonewline -- make the space optional for easier scripting p <- choice [ string "day" ,string "week" ,string "month" ,string "quarter" ,string "year" ] -- XXX support these in fixSmartDate -- ++ (map string $ months ++ monthabbrevs ++ weekdays ++ weekdayabbrevs) return ("",r,p) -- | -- >>> let p = parsewith (periodexpr (parsedate "2008/11/26")) :: T.Text -> Either (ParseError Char Dec) (Interval, DateSpan) -- >>> p "from aug to oct" -- Right (NoInterval,DateSpan 2008/08/01-2008/09/30) -- >>> p "aug to oct" -- Right (NoInterval,DateSpan 2008/08/01-2008/09/30) -- >>> p "every 3 days in aug" -- Right (Days 3,DateSpan 2008/08) -- >>> p "daily from aug" -- Right (Days 1,DateSpan 2008/08/01-) -- >>> p "every week to 2009" -- Right (Weeks 1,DateSpan -2008/12/31) periodexpr :: Day -> Parser (Interval, DateSpan) periodexpr rdate = choice $ map try [ intervalanddateperiodexpr rdate, intervalperiodexpr, dateperiodexpr rdate, (return (NoInterval,DateSpan Nothing Nothing)) ] intervalanddateperiodexpr :: Day -> Parser (Interval, DateSpan) intervalanddateperiodexpr rdate = do many spacenonewline i <- reportinginterval many spacenonewline s <- periodexprdatespan rdate return (i,s) intervalperiodexpr :: Parser (Interval, DateSpan) intervalperiodexpr = do many spacenonewline i <- reportinginterval return (i, DateSpan Nothing Nothing) dateperiodexpr :: Day -> Parser (Interval, DateSpan) dateperiodexpr rdate = do many spacenonewline s <- periodexprdatespan rdate return (NoInterval, s) -- Parse a reporting interval. reportinginterval :: Parser Interval reportinginterval = choice' [ tryinterval "day" "daily" Days, tryinterval "week" "weekly" Weeks, tryinterval "month" "monthly" Months, tryinterval "quarter" "quarterly" Quarters, tryinterval "year" "yearly" Years, do string "biweekly" return $ Weeks 2, do string "bimonthly" return $ Months 2, do string "every" many spacenonewline n <- fmap read $ some digitChar thsuffix many spacenonewline string "day" many spacenonewline string "of" many spacenonewline string "week" return $ DayOfWeek n, do string "every" many spacenonewline n <- fmap read $ some digitChar thsuffix many spacenonewline string "day" optional $ do many spacenonewline string "of" many spacenonewline string "month" return $ DayOfMonth n ] where thsuffix = choice' $ map string ["st","nd","rd","th"] -- Parse any of several variants of a basic interval, eg "daily", "every day", "every N days". tryinterval :: String -> String -> (Int -> Interval) -> Parser Interval tryinterval singular compact intcons = choice' [ do string compact return $ intcons 1, do string "every" many spacenonewline string singular return $ intcons 1, do string "every" many spacenonewline n <- fmap read $ some digitChar many spacenonewline string plural return $ intcons n ] where plural = singular ++ "s" periodexprdatespan :: Day -> Parser DateSpan periodexprdatespan rdate = choice $ map try [ doubledatespan rdate, fromdatespan rdate, todatespan rdate, justdatespan rdate ] doubledatespan :: Day -> Parser DateSpan doubledatespan rdate = do optional (string "from" >> many spacenonewline) b <- smartdate many spacenonewline optional (choice [string "to", string "-"] >> many spacenonewline) e <- smartdate return $ DateSpan (Just $ fixSmartDate rdate b) (Just $ fixSmartDate rdate e) fromdatespan :: Day -> Parser DateSpan fromdatespan rdate = do b <- choice [ do string "from" >> many spacenonewline smartdate , do d <- smartdate string "-" return d ] return $ DateSpan (Just $ fixSmartDate rdate b) Nothing todatespan :: Day -> Parser DateSpan todatespan rdate = do choice [string "to", string "-"] >> many spacenonewline e <- smartdate return $ DateSpan Nothing (Just $ fixSmartDate rdate e) justdatespan :: Day -> Parser DateSpan justdatespan rdate = do optional (string "in" >> many spacenonewline) d <- smartdate return $ spanFromSmartDate rdate d -- | Make a datespan from two valid date strings parseable by parsedate -- (or raise an error). Eg: mkdatespan \"2011/1/1\" \"2011/12/31\". mkdatespan :: String -> String -> DateSpan mkdatespan b = DateSpan (Just $ parsedate b) . Just . parsedate nulldatespan :: DateSpan nulldatespan = DateSpan Nothing Nothing nulldate :: Day nulldate = fromGregorian 0 1 1 hledger-lib-1.2/Hledger/Data/Journal.hs0000644000000000000000000012716513066173044016131 0ustar0000000000000000{-# LANGUAGE Rank2Types #-} {-# LANGUAGE StandaloneDeriving, OverloadedStrings #-} {-| A 'Journal' is a set of transactions, plus optional related data. This is hledger's primary data object. It is usually parsed from a journal file or other data format (see "Hledger.Read"). -} module Hledger.Data.Journal ( -- * Parsing helpers addMarketPrice, addModifierTransaction, addPeriodicTransaction, addTransaction, journalApplyAliases, journalBalanceTransactions, journalApplyCommodityStyles, commodityStylesFromAmounts, journalConvertAmountsToCost, journalFinalise, -- * Filtering filterJournalTransactions, filterJournalPostings, filterJournalAmounts, filterTransactionAmounts, filterTransactionPostings, filterPostingAmount, -- * Querying journalAccountNames, journalAccountNamesUsed, -- journalAmountAndPriceCommodities, journalAmounts, overJournalAmounts, traverseJournalAmounts, -- journalCanonicalCommodities, journalDateSpan, journalDescriptions, journalFilePath, journalFilePaths, journalTransactionAt, journalNextTransaction, journalPrevTransaction, journalPostings, -- * Standard account types journalBalanceSheetAccountQuery, journalProfitAndLossAccountQuery, journalIncomeAccountQuery, journalExpenseAccountQuery, journalAssetAccountQuery, journalLiabilityAccountQuery, journalEquityAccountQuery, journalCashAccountQuery, -- * Misc canonicalStyleFrom, matchpats, nulljournal, journalCheckBalanceAssertions, journalNumberAndTieTransactions, journalUntieTransactions, -- * Tests samplejournal, tests_Hledger_Data_Journal, ) where import Control.Applicative (Const(..)) import Control.Arrow import Control.Monad import Control.Monad.Except import qualified Control.Monad.Reader as R import Control.Monad.ST import Data.Array.ST import Data.Functor.Identity (Identity(..)) import qualified Data.HashTable.ST.Cuckoo as HT import Data.List -- import Data.Map (findWithDefault) import Data.Maybe import Data.Monoid import Data.Ord import Data.Text (Text) import qualified Data.Text as T import Safe (headMay, headDef) import Data.Time.Calendar import Data.Tree import System.Time (ClockTime(TOD)) import Test.HUnit import Text.Printf import qualified Data.Map as M import Hledger.Utils import Hledger.Data.Types import Hledger.Data.AccountName import Hledger.Data.Amount -- import Hledger.Data.Commodity import Hledger.Data.Dates import Hledger.Data.Transaction import Hledger.Data.Posting import Hledger.Query -- try to make Journal ppShow-compatible -- instance Show ClockTime where -- show t = "" -- deriving instance Show Journal instance Show Journal where show j | debugLevel < 3 = printf "Journal %s with %d transactions, %d accounts" (journalFilePath j) (length (jtxns j) + length (jmodifiertxns j) + length (jperiodictxns j)) (length accounts) | debugLevel < 6 = printf "Journal %s with %d transactions, %d accounts: %s" (journalFilePath j) (length (jtxns j) + length (jmodifiertxns j) + length (jperiodictxns j)) (length accounts) (show accounts) | otherwise = printf "Journal %s with %d transactions, %d accounts: %s, commodity styles: %s" (journalFilePath j) (length (jtxns j) + length (jmodifiertxns j) + length (jperiodictxns j)) (length accounts) (show accounts) (show $ jinferredcommodities j) -- ++ (show $ journalTransactions l) where accounts = filter (/= "root") $ flatten $ journalAccountNameTree j -- showJournalDebug j = unlines [ -- show j -- ,show (jtxns j) -- ,show (jmodifiertxns j) -- ,show (jperiodictxns j) -- ,show $ jparsetimeclockentries j -- ,show $ jmarketprices j -- ,show $ jfinalcommentlines j -- ,show $ jparsestate j -- ,show $ map fst $ jfiles j -- ] -- The monoid instance for Journal is useful for two situations. -- -- 1. concatenating finalised journals, eg with multiple -f options: -- FIRST <> SECOND. The second's list fields are appended to the -- first's, map fields are combined, transaction counts are summed, -- the parse state of the second is kept. -- -- 2. merging a child parsed journal, eg with the include directive: -- CHILD <> PARENT. A parsed journal's data is in reverse order, so -- this gives what we want. -- instance Monoid Journal where mempty = nulljournal mappend j1 j2 = Journal { jparsedefaultyear = jparsedefaultyear j2 ,jparsedefaultcommodity = jparsedefaultcommodity j2 ,jparseparentaccounts = jparseparentaccounts j2 ,jparsealiases = jparsealiases j2 -- ,jparsetransactioncount = jparsetransactioncount j1 + jparsetransactioncount j2 ,jparsetimeclockentries = jparsetimeclockentries j1 <> jparsetimeclockentries j2 ,jaccounts = jaccounts j1 <> jaccounts j2 ,jcommodities = jcommodities j1 <> jcommodities j2 ,jinferredcommodities = jinferredcommodities j1 <> jinferredcommodities j2 ,jmarketprices = jmarketprices j1 <> jmarketprices j2 ,jmodifiertxns = jmodifiertxns j1 <> jmodifiertxns j2 ,jperiodictxns = jperiodictxns j1 <> jperiodictxns j2 ,jtxns = jtxns j1 <> jtxns j2 ,jfinalcommentlines = jfinalcommentlines j2 ,jfiles = jfiles j1 <> jfiles j2 ,jlastreadtime = max (jlastreadtime j1) (jlastreadtime j2) } nulljournal :: Journal nulljournal = Journal { jparsedefaultyear = Nothing ,jparsedefaultcommodity = Nothing ,jparseparentaccounts = [] ,jparsealiases = [] -- ,jparsetransactioncount = 0 ,jparsetimeclockentries = [] ,jaccounts = [] ,jcommodities = M.fromList [] ,jinferredcommodities = M.fromList [] ,jmarketprices = [] ,jmodifiertxns = [] ,jperiodictxns = [] ,jtxns = [] ,jfinalcommentlines = "" ,jfiles = [] ,jlastreadtime = TOD 0 0 } journalFilePath :: Journal -> FilePath journalFilePath = fst . mainfile journalFilePaths :: Journal -> [FilePath] journalFilePaths = map fst . jfiles mainfile :: Journal -> (FilePath, Text) mainfile = headDef ("", "") . jfiles addTransaction :: Transaction -> Journal -> Journal addTransaction t j = j { jtxns = t : jtxns j } addModifierTransaction :: ModifierTransaction -> Journal -> Journal addModifierTransaction mt j = j { jmodifiertxns = mt : jmodifiertxns j } addPeriodicTransaction :: PeriodicTransaction -> Journal -> Journal addPeriodicTransaction pt j = j { jperiodictxns = pt : jperiodictxns j } addMarketPrice :: MarketPrice -> Journal -> Journal addMarketPrice h j = j { jmarketprices = h : jmarketprices j } -- | Get the transaction with this index (its 1-based position in the input stream), if any. journalTransactionAt :: Journal -> Integer -> Maybe Transaction journalTransactionAt Journal{jtxns=ts} i = -- it's probably ts !! (i+1), but we won't assume headMay [t | t <- ts, tindex t == i] -- | Get the transaction that appeared immediately after this one in the input stream, if any. journalNextTransaction :: Journal -> Transaction -> Maybe Transaction journalNextTransaction j t = journalTransactionAt j (tindex t + 1) -- | Get the transaction that appeared immediately before this one in the input stream, if any. journalPrevTransaction :: Journal -> Transaction -> Maybe Transaction journalPrevTransaction j t = journalTransactionAt j (tindex t - 1) -- | Unique transaction descriptions used in this journal. journalDescriptions :: Journal -> [Text] journalDescriptions = nub . sort . map tdescription . jtxns -- | All postings from this journal's transactions, in order. journalPostings :: Journal -> [Posting] journalPostings = concatMap tpostings . jtxns -- | Unique account names posted to in this journal. journalAccountNamesUsed :: Journal -> [AccountName] journalAccountNamesUsed = sort . accountNamesFromPostings . journalPostings -- | Unique account names in this journal, including parent accounts containing no postings. journalAccountNames :: Journal -> [AccountName] journalAccountNames = sort . expandAccountNames . journalAccountNamesUsed journalAccountNameTree :: Journal -> Tree AccountName journalAccountNameTree = accountNameTreeFrom . journalAccountNames -- standard account types -- | A query for Profit & Loss accounts in this journal. -- Cf . journalProfitAndLossAccountQuery :: Journal -> Query journalProfitAndLossAccountQuery j = Or [journalIncomeAccountQuery j ,journalExpenseAccountQuery j ] -- | A query for Income (Revenue) accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^(income|revenue)s?(:|$)@. journalIncomeAccountQuery :: Journal -> Query journalIncomeAccountQuery _ = Acct "^(income|revenue)s?(:|$)" -- | A query for Expense accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^expenses?(:|$)@. journalExpenseAccountQuery :: Journal -> Query journalExpenseAccountQuery _ = Acct "^expenses?(:|$)" -- | A query for Asset, Liability & Equity accounts in this journal. -- Cf . journalBalanceSheetAccountQuery :: Journal -> Query journalBalanceSheetAccountQuery j = Or [journalAssetAccountQuery j ,journalLiabilityAccountQuery j ,journalEquityAccountQuery j ] -- | A query for Asset accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^assets?(:|$)@. journalAssetAccountQuery :: Journal -> Query journalAssetAccountQuery _ = Acct "^assets?(:|$)" -- | A query for Liability accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^(debts?|liabilit(y|ies))(:|$)@. journalLiabilityAccountQuery :: Journal -> Query journalLiabilityAccountQuery _ = Acct "^(debts?|liabilit(y|ies))(:|$)" -- | A query for Equity accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^equity(:|$)@. journalEquityAccountQuery :: Journal -> Query journalEquityAccountQuery _ = Acct "^equity(:|$)" -- | A query for Cash (-equivalent) accounts in this journal (ie, -- accounts which appear on the cashflow statement.) This is currently -- hard-coded to be all the Asset accounts except for those containing the -- case-insensitive regex @(receivable|A/R)@. journalCashAccountQuery :: Journal -> Query journalCashAccountQuery j = And [journalAssetAccountQuery j, Not $ Acct "(receivable|A/R)"] -- Various kinds of filtering on journals. We do it differently depending -- on the command. ------------------------------------------------------------------------------- -- filtering V2 -- | Keep only transactions matching the query expression. filterJournalTransactions :: Query -> Journal -> Journal filterJournalTransactions q j@Journal{jtxns=ts} = j{jtxns=filter (q `matchesTransaction`) ts} -- | Keep only postings matching the query expression. -- This can leave unbalanced transactions. filterJournalPostings :: Query -> Journal -> Journal filterJournalPostings q j@Journal{jtxns=ts} = j{jtxns=map (filterTransactionPostings q) ts} -- | Within each posting's amount, keep only the parts matching the query. -- This can leave unbalanced transactions. filterJournalAmounts :: Query -> Journal -> Journal filterJournalAmounts q j@Journal{jtxns=ts} = j{jtxns=map (filterTransactionAmounts q) ts} -- | Filter out all parts of this transaction's amounts which do not match the query. -- This can leave the transaction unbalanced. filterTransactionAmounts :: Query -> Transaction -> Transaction filterTransactionAmounts q t@Transaction{tpostings=ps} = t{tpostings=map (filterPostingAmount q) ps} -- | Filter out all parts of this posting's amount which do not match the query. filterPostingAmount :: Query -> Posting -> Posting filterPostingAmount q p@Posting{pamount=Mixed as} = p{pamount=Mixed $ filter (q `matchesAmount`) as} filterTransactionPostings :: Query -> Transaction -> Transaction filterTransactionPostings q t@Transaction{tpostings=ps} = t{tpostings=filter (q `matchesPosting`) ps} {- ------------------------------------------------------------------------------- -- filtering V1 -- | Keep only transactions we are interested in, as described by the -- filter specification. filterJournalTransactions :: FilterSpec -> Journal -> Journal filterJournalTransactions FilterSpec{datespan=datespan ,cleared=cleared -- ,real=real -- ,empty=empty ,acctpats=apats ,descpats=dpats ,depth=depth ,fMetadata=md } = filterJournalTransactionsByClearedStatus cleared . filterJournalPostingsByDepth depth . filterJournalTransactionsByAccount apats . filterJournalTransactionsByMetadata md . filterJournalTransactionsByDescription dpats . filterJournalTransactionsByDate datespan -- | Keep only postings we are interested in, as described by the filter -- specification. This can leave unbalanced transactions. filterJournalPostings :: FilterSpec -> Journal -> Journal filterJournalPostings FilterSpec{datespan=datespan ,cleared=cleared ,real=real ,empty=empty ,acctpats=apats ,descpats=dpats ,depth=depth ,fMetadata=md } = filterJournalPostingsByRealness real . filterJournalPostingsByClearedStatus cleared . filterJournalPostingsByEmpty empty . filterJournalPostingsByDepth depth . filterJournalPostingsByAccount apats . filterJournalTransactionsByMetadata md . filterJournalTransactionsByDescription dpats . filterJournalTransactionsByDate datespan -- | Keep only transactions whose metadata matches all metadata specifications. filterJournalTransactionsByMetadata :: [(String,String)] -> Journal -> Journal filterJournalTransactionsByMetadata pats j@Journal{jtxns=ts} = j{jtxns=filter matchmd ts} where matchmd t = all (`elem` tmetadata t) pats -- | Keep only transactions whose description matches the description patterns. filterJournalTransactionsByDescription :: [String] -> Journal -> Journal filterJournalTransactionsByDescription pats j@Journal{jtxns=ts} = j{jtxns=filter matchdesc ts} where matchdesc = matchpats pats . tdescription -- | Keep only transactions which fall between begin and end dates. -- We include transactions on the begin date and exclude transactions on the end -- date, like ledger. An empty date string means no restriction. filterJournalTransactionsByDate :: DateSpan -> Journal -> Journal filterJournalTransactionsByDate (DateSpan begin end) j@Journal{jtxns=ts} = j{jtxns=filter match ts} where match t = maybe True (tdate t>=) begin && maybe True (tdate t<) end -- | Keep only transactions which have the requested cleared/uncleared -- status, if there is one. filterJournalTransactionsByClearedStatus :: Maybe Bool -> Journal -> Journal filterJournalTransactionsByClearedStatus Nothing j = j filterJournalTransactionsByClearedStatus (Just val) j@Journal{jtxns=ts} = j{jtxns=filter match ts} where match = (==val).tstatus -- | Keep only postings which have the requested cleared/uncleared status, -- if there is one. filterJournalPostingsByClearedStatus :: Maybe Bool -> Journal -> Journal filterJournalPostingsByClearedStatus Nothing j = j filterJournalPostingsByClearedStatus (Just c) j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter ((==c) . postingCleared) ps} -- | Strip out any virtual postings, if the flag is true, otherwise do -- no filtering. filterJournalPostingsByRealness :: Bool -> Journal -> Journal filterJournalPostingsByRealness False j = j filterJournalPostingsByRealness True j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter isReal ps} -- | Strip out any postings with zero amount, unless the flag is true. filterJournalPostingsByEmpty :: Bool -> Journal -> Journal filterJournalPostingsByEmpty True j = j filterJournalPostingsByEmpty False j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter (not . isEmptyPosting) ps} -- -- | Keep only transactions which affect accounts deeper than the specified depth. -- filterJournalTransactionsByDepth :: Maybe Int -> Journal -> Journal -- filterJournalTransactionsByDepth Nothing j = j -- filterJournalTransactionsByDepth (Just d) j@Journal{jtxns=ts} = -- j{jtxns=(filter (any ((<= d+1) . accountNameLevel . paccount) . tpostings) ts)} -- | Strip out any postings to accounts deeper than the specified depth -- (and any transactions which have no postings as a result). filterJournalPostingsByDepth :: Maybe Int -> Journal -> Journal filterJournalPostingsByDepth Nothing j = j filterJournalPostingsByDepth (Just d) j@Journal{jtxns=ts} = j{jtxns=filter (not . null . tpostings) $ map filtertxns ts} where filtertxns t@Transaction{tpostings=ps} = t{tpostings=filter ((<= d) . accountNameLevel . paccount) ps} -- | Keep only postings which affect accounts matched by the account patterns. -- This can leave transactions unbalanced. filterJournalPostingsByAccount :: [String] -> Journal -> Journal filterJournalPostingsByAccount apats j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter (matchpats apats . paccount) ps} -- | Keep only transactions which affect accounts matched by the account patterns. -- More precisely: each positive account pattern excludes transactions -- which do not contain a posting to a matched account, and each negative -- account pattern excludes transactions containing a posting to a matched -- account. filterJournalTransactionsByAccount :: [String] -> Journal -> Journal filterJournalTransactionsByAccount apats j@Journal{jtxns=ts} = j{jtxns=filter tmatch ts} where tmatch t = (null positives || any positivepmatch ps) && (null negatives || not (any negativepmatch ps)) where ps = tpostings t positivepmatch p = any (`amatch` a) positives where a = paccount p negativepmatch p = any (`amatch` a) negatives where a = paccount p amatch pat a = regexMatchesCI (abspat pat) a (negatives,positives) = partition isnegativepat apats -} -- | Apply additional account aliases (eg from the command-line) to all postings in a journal. journalApplyAliases :: [AccountAlias] -> Journal -> Journal journalApplyAliases aliases j@Journal{jtxns=ts} = -- (if null aliases -- then id -- else (dbgtrace $ -- "applying additional command-line aliases:\n" -- ++ chomp (unlines $ map (" "++) $ lines $ ppShow aliases))) $ j{jtxns=map dotransaction ts} where dotransaction t@Transaction{tpostings=ps} = t{tpostings=map doposting ps} doposting p@Posting{paccount=a} = p{paccount= accountNameApplyAliases aliases a} -- | Do post-parse processing on a parsed journal to make it ready for -- use. Reverse parsed data to normal order, canonicalise amount -- formats, check/ensure that transactions are balanced, and maybe -- check balance assertions. journalFinalise :: ClockTime -> FilePath -> Text -> Bool -> ParsedJournal -> Either String Journal journalFinalise t path txt assrt j@Journal{jfiles=fs} = do (journalTieTransactions <$> (journalBalanceTransactions assrt $ journalApplyCommodityStyles $ j{ jfiles = (path,txt) : reverse fs , jlastreadtime = t , jtxns = reverse $ jtxns j -- NOTE: see addTransaction , jmodifiertxns = reverse $ jmodifiertxns j -- NOTE: see addModifierTransaction , jperiodictxns = reverse $ jperiodictxns j -- NOTE: see addPeriodicTransaction , jmarketprices = reverse $ jmarketprices j -- NOTE: see addMarketPrice })) journalNumberAndTieTransactions = journalTieTransactions . journalNumberTransactions -- | Number (set the tindex field) this journal's transactions, counting upward from 1. journalNumberTransactions :: Journal -> Journal journalNumberTransactions j@Journal{jtxns=ts} = j{jtxns=map (\(i,t) -> t{tindex=i}) $ zip [1..] ts} -- | Tie the knot in all of this journal's transactions, ensuring their postings -- refer to them. This should be done last, after any other transaction-modifying operations. journalTieTransactions :: Journal -> Journal journalTieTransactions j@Journal{jtxns=ts} = j{jtxns=map txnTieKnot ts} -- | Untie all transaction-posting knots in this journal, so that eg -- recursiveSize and GHCI's :sprint can work on it. journalUntieTransactions :: Transaction -> Transaction journalUntieTransactions t@Transaction{tpostings=ps} = t{tpostings=map (\p -> p{ptransaction=Nothing}) ps} -- | Check any balance assertions in the journal and return an error -- message if any of them fail. journalCheckBalanceAssertions :: Journal -> Either String Journal journalCheckBalanceAssertions j = runST $ journalBalanceTransactionsST True j (return ()) (\_ _ -> return ()) (const $ return j) -- noops -- | Check a posting's balance assertion and return an error if it -- fails. checkBalanceAssertion :: Posting -> MixedAmount -> Either String () checkBalanceAssertion p@Posting{ pbalanceassertion = Just ass} amt | isReallyZeroAmount diff = Right () | True = Left err where assertedcomm = acommodity ass actualbal = fromMaybe nullamt $ find ((== assertedcomm) . acommodity) (amounts amt) diff = ass - actualbal diffplus | isNegativeAmount diff == False = "+" | otherwise = "" err = printf (unlines [ "balance assertion error%s", "after posting:", "%s", "balance assertion details:", "date: %s", "account: %s", "commodity: %s", "calculated: %s", "asserted: %s (difference: %s)" ]) (case ptransaction p of Nothing -> ":" -- shouldn't happen Just t -> printf " in %s:\nin transaction:\n%s" (showGenericSourcePos $ tsourcepos t) (chomp $ show t) :: String) (showPostingLine p) (showDate $ postingDate p) (T.unpack $ paccount p) -- XXX pack assertedcomm (showAmount actualbal) (showAmount ass) (diffplus ++ showAmount diff) checkBalanceAssertion _ _ = Right () -- | Environment for 'CurrentBalancesModifier' data Env s = Env { eBalances :: HT.HashTable s AccountName MixedAmount , eStoreTx :: Transaction -> ST s () , eAssrt :: Bool , eStyles :: Maybe (M.Map CommoditySymbol AmountStyle) } -- | Monad transformer stack with a reference to a mutable hashtable -- of current account balances and a mutable array of finished -- transactions in original parsing order. type CurrentBalancesModifier s = R.ReaderT (Env s) (ExceptT String (ST s)) -- | Fill in any missing amounts and check that all journal transactions -- balance, or return an error message. This is done after parsing all -- amounts and applying canonical commodity styles, since balancing -- depends on display precision. Reports only the first error encountered. journalBalanceTransactions :: Bool -> Journal -> Either String Journal journalBalanceTransactions assrt j = runST $ journalBalanceTransactionsST assrt (journalNumberTransactions j) (newArray_ (1, genericLength $ jtxns j) :: forall s. ST s (STArray s Integer Transaction)) (\arr tx -> writeArray arr (tindex tx) tx) $ fmap (\txns -> j{ jtxns = txns}) . getElems -- | Generalization used in the definition of -- 'journalBalanceTransactionsST and 'journalCheckBalanceAssertions' journalBalanceTransactionsST :: Bool -> Journal -> ST s txns -- ^ creates transaction store -> (txns -> Transaction -> ST s ()) -- ^ "store" operation -> (txns -> ST s a) -- ^ calculate result from transactions -> ST s (Either String a) journalBalanceTransactionsST assrt j createStore storeIn extract = runExceptT $ do bals <- lift $ HT.newSized size txStore <- lift $ createStore flip R.runReaderT (Env bals (storeIn txStore) assrt $ Just $ jinferredcommodities j) $ do dated <- fmap snd . sortBy (comparing fst) . concat <$> mapM' discriminateByDate (jtxns j) mapM' checkInferAndRegisterAmounts dated lift $ extract txStore where size = genericLength $ journalPostings j -- | This converts a transaction into a list of objects whose dates -- have to be considered when checking balance assertions and handled -- by 'checkInferAndRegisterAmounts'. -- -- Transaction without balance assignments can be balanced and stored -- immediately and their (possibly) dated postings are returned. -- -- Transaction with balance assignments are only supported if no -- posting has a 'pdate' value. Supported transactions will be -- returned unchanged and balanced and stored later in 'checkInferAndRegisterAmounts'. discriminateByDate :: Transaction -> CurrentBalancesModifier s [(Day, Either Posting Transaction)] discriminateByDate tx | null (assignmentPostings tx) = do styles <- R.reader $ eStyles balanced <- lift $ ExceptT $ return $ balanceTransaction styles tx storeTransaction balanced return $ fmap (postingDate &&& (Left . removePrices)) $ tpostings $ balanced | True = do when (any (isJust . pdate) $ tpostings tx) $ throwError $ unlines $ ["Not supported: Transactions with balance assignments " ,"AND dated postings without amount:\n" , showTransaction tx] return [(tdate tx, Right $ tx { tpostings = removePrices <$> tpostings tx })] -- | This function takes different objects describing changes to -- account balances on a single day. It can handle either a single -- posting (from an already balanced transaction without assigments) -- or a whole transaction with assignments (which is required to no -- posting with pdate set.). -- -- For a single posting, there is not much to do. Only add its amount -- to its account and check the assertion, if there is one. This -- functionality is provided by 'addAmountAndCheckBalance'. -- -- For a whole transaction, it loops over all postings, and performs -- 'addAmountAndCheckBalance', if there is an amount. If there is no -- amount, the amount is inferred by the assertion or left empty if -- there is no assertion. Then, the transaction is balanced, the -- inferred amount added to the balance (all in -- 'balanceTransactionUpdate') and the resulting transaction with no -- missing amounts is stored in the array, for later retrieval. -- -- Again in short: -- -- 'Left Posting': Check the balance assertion and update the -- account balance. If the amount is empty do nothing. this can be -- the case e.g. for virtual postings -- -- 'Right Transaction': Loop over all postings, infer their amounts -- and then balance and store the transaction. checkInferAndRegisterAmounts :: Either Posting Transaction -> CurrentBalancesModifier s () checkInferAndRegisterAmounts (Left p) = void $ addAmountAndCheckBalance return p checkInferAndRegisterAmounts (Right oldTx) = do let ps = tpostings oldTx styles <- R.reader $ eStyles newPostings <- forM ps $ addAmountAndCheckBalance inferFromAssignment storeTransaction =<< balanceTransactionUpdate (fmap void . addToBalance) styles oldTx { tpostings = newPostings } where inferFromAssignment :: Posting -> CurrentBalancesModifier s Posting inferFromAssignment p = maybe (return p) (fmap (\a -> p { pamount = a, porigin = Just $ originalPosting p }) . setBalance (paccount p)) $ pbalanceassertion p -- | Adds a posting's amonut to the posting's account balance and -- checks a possible balance assertion. If there is no amount, it runs -- the supplied fallback action. addAmountAndCheckBalance :: (Posting -> CurrentBalancesModifier s Posting) -- ^ action to execute, if posting has no amount -> Posting -> CurrentBalancesModifier s Posting addAmountAndCheckBalance _ p | hasAmount p = do newAmt <- addToBalance (paccount p) $ pamount p assrt <- R.reader eAssrt lift $ when assrt $ ExceptT $ return $ checkBalanceAssertion p newAmt return p addAmountAndCheckBalance fallback p = fallback p -- | Sets an account's balance to a given amount and returns the -- difference of new and old amount setBalance :: AccountName -> Amount -> CurrentBalancesModifier s MixedAmount setBalance acc amt = liftModifier $ \Env{ eBalances = bals } -> do old <- HT.lookup bals acc let new = Mixed $ (amt :) $ maybe [] (filter ((/= acommodity amt) . acommodity) . amounts) old HT.insert bals acc new return $ maybe new (new -) old -- | Adds an amount to an account's balance and returns the resulting -- balance addToBalance :: AccountName -> MixedAmount -> CurrentBalancesModifier s MixedAmount addToBalance acc amt = liftModifier $ \Env{ eBalances = bals } -> do new <- maybe amt (+ amt) <$> HT.lookup bals acc HT.insert bals acc new return new -- | Stores a transaction in the transaction array in original parsing -- order. storeTransaction :: Transaction -> CurrentBalancesModifier s () storeTransaction tx = liftModifier $ ($tx) . eStoreTx -- | Helper function liftModifier :: (Env s -> ST s a) -> CurrentBalancesModifier s a liftModifier f = R.ask >>= lift . lift . f -- | Choose and apply a consistent display format to the posting -- amounts in each commodity. Each commodity's format is specified by -- a commodity format directive, or otherwise inferred from posting -- amounts as in hledger < 0.28. journalApplyCommodityStyles :: Journal -> Journal journalApplyCommodityStyles j@Journal{jtxns=ts, jmarketprices=mps} = j'' where j' = journalInferCommodityStyles j j'' = j'{jtxns=map fixtransaction ts, jmarketprices=map fixmarketprice mps} fixtransaction t@Transaction{tpostings=ps} = t{tpostings=map fixposting ps} fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a} fixmarketprice mp@MarketPrice{mpamount=a} = mp{mpamount=fixamount a} fixmixedamount (Mixed as) = Mixed $ map fixamount as fixamount a@Amount{acommodity=c} = a{astyle=journalCommodityStyle j' c} -- | Get this journal's standard display style for the given -- commodity. That is the style defined by the last corresponding -- commodity format directive if any, otherwise the style inferred -- from the posting amounts (or in some cases, price amounts) in this -- commodity if any, otherwise the default style. journalCommodityStyle :: Journal -> CommoditySymbol -> AmountStyle journalCommodityStyle j c = headDef amountstyle{asprecision=2} $ catMaybes [ M.lookup c (jcommodities j) >>= cformat ,M.lookup c $ jinferredcommodities j ] -- | Infer a display format for each commodity based on the amounts parsed. -- "hledger... will use the format of the first posting amount in the -- commodity, and the highest precision of all posting amounts in the commodity." journalInferCommodityStyles :: Journal -> Journal journalInferCommodityStyles j = j{jinferredcommodities = commodityStylesFromAmounts $ dbg8 "journalChooseCommmodityStyles using amounts" $ journalAmounts j} -- | Given a list of amounts in parse order, build a map from their commodity names -- to standard commodity display formats. commodityStylesFromAmounts :: [Amount] -> M.Map CommoditySymbol AmountStyle commodityStylesFromAmounts amts = M.fromList commstyles where samecomm = \a1 a2 -> acommodity a1 == acommodity a2 commamts = [(acommodity $ head as, as) | as <- groupBy samecomm $ sortBy (comparing acommodity) amts] commstyles = [(c, canonicalStyleFrom $ map astyle as) | (c,as) <- commamts] -- | Given an ordered list of amount styles, choose a canonical style. -- That is: the style of the first, and the -- maximum precision of all. canonicalStyleFrom :: [AmountStyle] -> AmountStyle canonicalStyleFrom [] = amountstyle canonicalStyleFrom ss@(first:_) = first{asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} where mgrps = maybe Nothing Just $ headMay $ catMaybes $ map asdigitgroups ss -- precision is maximum of all precisions prec = maximumStrict $ map asprecision ss mdec = Just $ headDef '.' $ catMaybes $ map asdecimalpoint ss -- precision is that of first amount with a decimal point -- (mdec, prec) = -- case filter (isJust . asdecimalpoint) ss of -- (s:_) -> (asdecimalpoint s, asprecision s) -- [] -> (Just '.', 0) -- -- | Apply this journal's historical price records to unpriced amounts where possible. -- journalApplyMarketPrices :: Journal -> Journal -- journalApplyMarketPrices j@Journal{jtxns=ts} = j{jtxns=map fixtransaction ts} -- where -- fixtransaction t@Transaction{tdate=d, tpostings=ps} = t{tpostings=map fixposting ps} -- where -- fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a} -- fixmixedamount (Mixed as) = Mixed $ map fixamount as -- fixamount = fixprice -- fixprice a@Amount{price=Just _} = a -- fixprice a@Amount{commodity=c} = a{price=maybe Nothing (Just . UnitPrice) $ journalMarketPriceFor j d c} -- -- | Get the price for a commodity on the specified day from the price database, if known. -- -- Does only one lookup step, ie will not look up the price of a price. -- journalMarketPriceFor :: Journal -> Day -> CommoditySymbol -> Maybe MixedAmount -- journalMarketPriceFor j d CommoditySymbol{symbol=s} = do -- let ps = reverse $ filter ((<= d).mpdate) $ filter ((s==).hsymbol) $ sortBy (comparing mpdate) $ jmarketprices j -- case ps of (MarketPrice{mpamount=a}:_) -> Just a -- _ -> Nothing -- | Convert all this journal's amounts to cost by applying their prices, if any. journalConvertAmountsToCost :: Journal -> Journal journalConvertAmountsToCost j@Journal{jtxns=ts} = j{jtxns=map fixtransaction ts} where -- similar to journalApplyCommodityStyles fixtransaction t@Transaction{tpostings=ps} = t{tpostings=map fixposting ps} fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a} fixmixedamount (Mixed as) = Mixed $ map fixamount as fixamount = canonicaliseAmount (jinferredcommodities j) . costOfAmount -- -- | Get this journal's unique, display-preference-canonicalised commodities, by symbol. -- journalCanonicalCommodities :: Journal -> M.Map String CommoditySymbol -- journalCanonicalCommodities j = canonicaliseCommodities $ journalAmountCommodities j -- -- | Get all this journal's amounts' commodities, in the order parsed. -- journalAmountCommodities :: Journal -> [CommoditySymbol] -- journalAmountCommodities = map acommodity . concatMap amounts . journalAmounts -- -- | Get all this journal's amount and price commodities, in the order parsed. -- journalAmountAndPriceCommodities :: Journal -> [CommoditySymbol] -- journalAmountAndPriceCommodities = concatMap amountCommodities . concatMap amounts . journalAmounts -- -- | Get this amount's commodity and any commodities referenced in its price. -- amountCommodities :: Amount -> [CommoditySymbol] -- amountCommodities Amount{acommodity=c,aprice=p} = -- case p of Nothing -> [c] -- Just (UnitPrice ma) -> c:(concatMap amountCommodities $ amounts ma) -- Just (TotalPrice ma) -> c:(concatMap amountCommodities $ amounts ma) -- | Get an ordered list of the amounts in this journal which will -- influence amount style canonicalisation. These are: -- -- * amounts in market price directives (in parse order) -- * amounts in postings (in parse order) -- -- Amounts in default commodity directives also influence -- canonicalisation, but earlier, as amounts are parsed. -- Amounts in posting prices are not used for canonicalisation. -- journalAmounts :: Journal -> [Amount] journalAmounts = getConst . traverseJournalAmounts (Const . (:[])) -- | Maps over all of the amounts in the journal overJournalAmounts :: (Amount -> Amount) -> Journal -> Journal overJournalAmounts f = runIdentity . traverseJournalAmounts (Identity . f) -- | Traverses over all ofthe amounts in the journal, in the order -- indicated by 'journalAmounts'. traverseJournalAmounts :: Applicative f => (Amount -> f Amount) -> Journal -> f Journal traverseJournalAmounts f j = recombine <$> (traverse . mpa) f (jmarketprices j) <*> (traverse . tp . traverse . pamt . maa . traverse) f (jtxns j) where recombine mps txns = j { jmarketprices = mps, jtxns = txns } -- a bunch of traversals mpa g mp = (\amt -> mp { mpamount = amt }) <$> g (mpamount mp) tp g t = (\ps -> t { tpostings = ps }) <$> g (tpostings t) pamt g p = (\amt -> p { pamount = amt }) <$> g (pamount p) maa g (Mixed as) = Mixed <$> g as -- | The fully specified date span enclosing the dates (primary or secondary) -- of all this journal's transactions and postings, or DateSpan Nothing Nothing -- if there are none. journalDateSpan :: Bool -> Journal -> DateSpan journalDateSpan secondary j | null ts = DateSpan Nothing Nothing | otherwise = DateSpan (Just earliest) (Just $ addDays 1 latest) where earliest = minimumStrict dates latest = maximumStrict dates dates = pdates ++ tdates tdates = map (if secondary then transactionDate2 else tdate) ts pdates = concatMap (catMaybes . map (if secondary then (Just . postingDate2) else pdate) . tpostings) ts ts = jtxns j -- #ifdef TESTS test_journalDateSpan = do "journalDateSpan" ~: do assertEqual "" (DateSpan (Just $ fromGregorian 2014 1 10) (Just $ fromGregorian 2014 10 11)) (journalDateSpan True j) where j = nulljournal{jtxns = [nulltransaction{tdate = parsedate "2014/02/01" ,tpostings = [posting{pdate=Just (parsedate "2014/01/10")}] } ,nulltransaction{tdate = parsedate "2014/09/01" ,tpostings = [posting{pdate2=Just (parsedate "2014/10/10")}] } ]} -- #endif -- Misc helpers -- | Check if a set of hledger account/description filter patterns matches the -- given account name or entry description. Patterns are case-insensitive -- regular expressions. Prefixed with not:, they become anti-patterns. matchpats :: [String] -> String -> Bool matchpats pats str = (null positives || any match positives) && (null negatives || not (any match negatives)) where (negatives,positives) = partition isnegativepat pats match "" = True match pat = regexMatchesCI (abspat pat) str negateprefix = "not:" isnegativepat = (negateprefix `isPrefixOf`) abspat pat = if isnegativepat pat then drop (length negateprefix) pat else pat -- debug helpers -- traceAmountPrecision a = trace (show $ map (precision . acommodity) $ amounts a) a -- tracePostingsCommodities ps = trace (show $ map ((map (precision . acommodity) . amounts) . pamount) ps) ps -- tests -- A sample journal for testing, similar to examples/sample.journal: -- -- 2008/01/01 income -- assets:bank:checking $1 -- income:salary -- -- 2008/06/01 gift -- assets:bank:checking $1 -- income:gifts -- -- 2008/06/02 save -- assets:bank:saving $1 -- assets:bank:checking -- -- 2008/06/03 * eat & shop -- expenses:food $1 -- expenses:supplies $1 -- assets:cash -- -- 2008/12/31 * pay off -- liabilities:debts $1 -- assets:bank:checking -- Right samplejournal = journalBalanceTransactions False $ nulljournal {jtxns = [ txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/01/01", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="income", tcomment="", ttags=[], tpostings= ["assets:bank:checking" `post` usd 1 ,"income:salary" `post` missingamt ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/06/01", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="gift", tcomment="", ttags=[], tpostings= ["assets:bank:checking" `post` usd 1 ,"income:gifts" `post` missingamt ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/06/02", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="save", tcomment="", ttags=[], tpostings= ["assets:bank:saving" `post` usd 1 ,"assets:bank:checking" `post` usd (-1) ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/06/03", tdate2=Nothing, tstatus=Cleared, tcode="", tdescription="eat & shop", tcomment="", ttags=[], tpostings=["expenses:food" `post` usd 1 ,"expenses:supplies" `post` usd 1 ,"assets:cash" `post` missingamt ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/12/31", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="pay off", tcomment="", ttags=[], tpostings=["liabilities:debts" `post` usd 1 ,"assets:bank:checking" `post` usd (-1) ], tpreceding_comment_lines="" } ] } tests_Hledger_Data_Journal = TestList $ [ test_journalDateSpan -- "query standard account types" ~: -- do -- let j = journal1 -- journalBalanceSheetAccountNames j `is` ["assets","assets:a","equity","equity:q","equity:q:qq","liabilities","liabilities:l"] -- journalProfitAndLossAccountNames j `is` ["expenses","expenses:e","income","income:i"] ] hledger-lib-1.2/Hledger/Data/Ledger.hs0000644000000000000000000000725013035210046015677 0ustar0000000000000000{-| A 'Ledger' is derived from a 'Journal' by applying a filter specification to select 'Transaction's and 'Posting's of interest. It contains the filtered journal and knows the resulting chart of accounts, account balances, and postings in each account. -} module Hledger.Data.Ledger where import qualified Data.Map as M -- import Data.Text (Text) import qualified Data.Text as T import Safe (headDef) import Test.HUnit import Text.Printf import Hledger.Data.Types import Hledger.Data.Account import Hledger.Data.Journal import Hledger.Data.Posting import Hledger.Query instance Show Ledger where show l = printf "Ledger with %d transactions, %d accounts\n" --"%s" (length (jtxns $ ljournal l) + length (jmodifiertxns $ ljournal l) + length (jperiodictxns $ ljournal l)) (length $ ledgerAccountNames l) -- (showtree $ ledgerAccountNameTree l) nullledger :: Ledger nullledger = Ledger { ljournal = nulljournal, laccounts = [] } -- | Filter a journal's transactions with the given query, then derive -- a ledger containing the chart of accounts and balances. If the -- query includes a depth limit, that will affect the ledger's -- journal but not the ledger's account tree. ledgerFromJournal :: Query -> Journal -> Ledger ledgerFromJournal q j = nullledger{ljournal=j'', laccounts=as} where (q',depthq) = (filterQuery (not . queryIsDepth) q, filterQuery queryIsDepth q) j' = filterJournalAmounts (filterQuery queryIsSym q) $ -- remove amount parts which the query's sym: terms would exclude filterJournalPostings q' j as = accountsFromPostings $ journalPostings j' j'' = filterJournalPostings depthq j' -- | List a ledger's account names. ledgerAccountNames :: Ledger -> [AccountName] ledgerAccountNames = drop 1 . map aname . laccounts -- | Get the named account from a ledger. ledgerAccount :: Ledger -> AccountName -> Maybe Account ledgerAccount l a = lookupAccount a $ laccounts l -- | Get this ledger's root account, which is a dummy "root" account -- above all others. This should always be first in the account list, -- if somehow not this returns a null account. ledgerRootAccount :: Ledger -> Account ledgerRootAccount = headDef nullacct . laccounts -- | List a ledger's top-level accounts (the ones below the root), in tree order. ledgerTopAccounts :: Ledger -> [Account] ledgerTopAccounts = asubs . head . laccounts -- | List a ledger's bottom-level (subaccount-less) accounts, in tree order. ledgerLeafAccounts :: Ledger -> [Account] ledgerLeafAccounts = filter (null.asubs) . laccounts -- | Accounts in ledger whose name matches the pattern, in tree order. ledgerAccountsMatching :: [String] -> Ledger -> [Account] ledgerAccountsMatching pats = filter (matchpats pats . T.unpack . aname) . laccounts -- XXX pack -- | List a ledger's postings, in the order parsed. ledgerPostings :: Ledger -> [Posting] ledgerPostings = journalPostings . ljournal -- | The (fully specified) date span containing all the ledger's (filtered) transactions, -- or DateSpan Nothing Nothing if there are none. ledgerDateSpan :: Ledger -> DateSpan ledgerDateSpan = postingsDateSpan . ledgerPostings -- | All commodities used in this ledger. ledgerCommodities :: Ledger -> [CommoditySymbol] ledgerCommodities = M.keys . jinferredcommodities . ljournal tests_ledgerFromJournal = [ "ledgerFromJournal" ~: do assertEqual "" (0) (length $ ledgerPostings $ ledgerFromJournal Any nulljournal) assertEqual "" (11) (length $ ledgerPostings $ ledgerFromJournal Any samplejournal) assertEqual "" (6) (length $ ledgerPostings $ ledgerFromJournal (Depth 2) samplejournal) ] tests_Hledger_Data_Ledger = TestList $ tests_ledgerFromJournal hledger-lib-1.2/Hledger/Data/MarketPrice.hs0000644000000000000000000000160313066173044016711 0ustar0000000000000000{-| A 'MarketPrice' represents a historical exchange rate between two commodities. (Ledger calls them historical prices.) For example, prices published by a stock exchange or the foreign exchange market. Some commands (balance, currently) can use this information to show the market value of things at a given date. -} {-# LANGUAGE OverloadedStrings, LambdaCase #-} module Hledger.Data.MarketPrice where import qualified Data.Text as T import Test.HUnit import Hledger.Data.Amount import Hledger.Data.Dates import Hledger.Data.Types -- | Get the string representation of an market price, based on its -- commodity's display settings. showMarketPrice :: MarketPrice -> String showMarketPrice mp = unwords [ "P" , showDate (mpdate mp) , T.unpack (mpcommodity mp) , (showAmount . setAmountPrecision maxprecision) (mpamount mp) ] tests_Hledger_Data_MarketPrice = TestList [] hledger-lib-1.2/Hledger/Data/Period.hs0000644000000000000000000002756013035510426015732 0ustar0000000000000000{-| Manipulate the time periods typically used for reports with Period, a richer abstraction than DateSpan. See also Types and Dates. -} module Hledger.Data.Period where import Data.Time.Calendar import Data.Time.Calendar.MonthDay import Data.Time.Calendar.OrdinalDate import Data.Time.Calendar.WeekDate import Data.Time.Format import Text.Printf import Hledger.Data.Types -- | Convert Periods to DateSpans. -- -- >>> periodAsDateSpan (MonthPeriod 2000 1) == DateSpan (Just $ fromGregorian 2000 1 1) (Just $ fromGregorian 2000 2 1) -- True periodAsDateSpan :: Period -> DateSpan periodAsDateSpan (DayPeriod d) = DateSpan (Just d) (Just $ addDays 1 d) periodAsDateSpan (WeekPeriod b) = DateSpan (Just b) (Just $ addDays 7 b) periodAsDateSpan (MonthPeriod y m) = DateSpan (Just $ fromGregorian y m 1) (Just $ fromGregorian y' m' 1) where (y',m') | m==12 = (y+1,1) | otherwise = (y,m+1) periodAsDateSpan (QuarterPeriod y q) = DateSpan (Just $ fromGregorian y m 1) (Just $ fromGregorian y' m' 1) where (y', q') | q==4 = (y+1,1) | otherwise = (y,q+1) quarterAsMonth q = (q-1) * 3 + 1 m = quarterAsMonth q m' = quarterAsMonth q' periodAsDateSpan (YearPeriod y) = DateSpan (Just $ fromGregorian y 1 1) (Just $ fromGregorian (y+1) 1 1) periodAsDateSpan (PeriodBetween b e) = DateSpan (Just b) (Just e) periodAsDateSpan (PeriodFrom b) = DateSpan (Just b) Nothing periodAsDateSpan (PeriodTo e) = DateSpan Nothing (Just e) periodAsDateSpan (PeriodAll) = DateSpan Nothing Nothing -- | Convert DateSpans to Periods. -- -- >>> dateSpanAsPeriod $ DateSpan (Just $ fromGregorian 2000 1 1) (Just $ fromGregorian 2000 2 1) -- MonthPeriod 2000 1 dateSpanAsPeriod :: DateSpan -> Period dateSpanAsPeriod (DateSpan (Just b) (Just e)) = simplifyPeriod $ PeriodBetween b e dateSpanAsPeriod (DateSpan (Just b) Nothing) = PeriodFrom b dateSpanAsPeriod (DateSpan Nothing (Just e)) = PeriodTo e dateSpanAsPeriod (DateSpan Nothing Nothing) = PeriodAll -- | Convert PeriodBetweens to a more abstract period where possible. -- -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 1 1 1) (fromGregorian 2 1 1) -- YearPeriod 1 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 10 1) (fromGregorian 2001 1 1) -- QuarterPeriod 2000 4 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 1) (fromGregorian 2000 3 1) -- MonthPeriod 2000 2 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2016 7 25) (fromGregorian 2016 8 1) -- WeekPeriod 2016-07-25 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 1 1) (fromGregorian 2000 1 2) -- DayPeriod 2000-01-01 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 28) (fromGregorian 2000 3 1) -- PeriodBetween 2000-02-28 2000-03-01 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 29) (fromGregorian 2000 3 1) -- DayPeriod 2000-02-29 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 12 31) (fromGregorian 2001 1 1) -- DayPeriod 2000-12-31 -- simplifyPeriod :: Period -> Period simplifyPeriod (PeriodBetween b e) = case (toGregorian b, toGregorian e) of -- a year ((by,1,1), (ey,1,1)) | by+1==ey -> YearPeriod by -- a half-year -- ((by,1,1), (ey,7,1)) | by==ey -> -- ((by,7,1), (ey,1,1)) | by+1==ey -> -- a quarter ((by,1,1), (ey,4,1)) | by==ey -> QuarterPeriod by 1 ((by,4,1), (ey,7,1)) | by==ey -> QuarterPeriod by 2 ((by,7,1), (ey,10,1)) | by==ey -> QuarterPeriod by 3 ((by,10,1), (ey,1,1)) | by+1==ey -> QuarterPeriod by 4 -- a month ((by,bm,1), (ey,em,1)) | by==ey && bm+1==em -> MonthPeriod by bm ((by,12,1), (ey,1,1)) | by+1==ey -> MonthPeriod by 12 -- a week (two successive mondays), -- YYYYwN ("week N of year YYYY") -- _ | let ((by,bw,bd), (ey,ew,ed)) = (toWeekDate from, toWeekDate to) in by==ey && fw+1==tw && bd==1 && ed==1 -> -- a week starting on a monday _ | let ((by,bw,bd), (ey,ew,ed)) = (toWeekDate b, toWeekDate (addDays (-1) e)) in by==ey && bw==ew && bd==1 && ed==7 -> WeekPeriod b -- a day ((by,bm,bd), (ey,em,ed)) | (by==ey && bm==em && bd+1==ed) || (by+1==ey && bm==12 && em==1 && bd==31 && ed==1) || -- crossing a year boundary (by==ey && bm+1==em && isLastDayOfMonth by bm bd && ed==1) -- crossing a month boundary -> DayPeriod b _ -> PeriodBetween b e simplifyPeriod p = p isLastDayOfMonth y m d = case m of 1 -> d==31 2 | isLeapYear y -> d==29 | otherwise -> d==28 3 -> d==31 4 -> d==30 5 -> d==31 6 -> d==30 7 -> d==31 8 -> d==31 9 -> d==30 10 -> d==31 11 -> d==30 12 -> d==31 _ -> False -- | Is this period a "standard" period, referencing a particular day, week, month, quarter, or year ? -- Periods of other durations, or infinite duration, or not starting on a standard period boundary, are not. isStandardPeriod = isStandardPeriod' . simplifyPeriod where isStandardPeriod' (DayPeriod _) = True isStandardPeriod' (WeekPeriod _) = True isStandardPeriod' (MonthPeriod _ _) = True isStandardPeriod' (QuarterPeriod _ _) = True isStandardPeriod' (YearPeriod _) = True isStandardPeriod' _ = False -- | Render a period as a compact display string suitable for user output. -- -- >>> showPeriod (WeekPeriod (fromGregorian 2016 7 25)) -- "2016/07/25w30" showPeriod (DayPeriod b) = formatTime defaultTimeLocale "%0C%y/%m/%d" b -- DATE showPeriod (WeekPeriod b) = formatTime defaultTimeLocale "%0C%y/%m/%dw%V" b -- STARTDATEwYEARWEEK showPeriod (MonthPeriod y m) = printf "%04d/%02d" y m -- YYYY/MM showPeriod (QuarterPeriod y q) = printf "%04dq%d" y q -- YYYYqN showPeriod (YearPeriod y) = printf "%04d" y -- YYYY showPeriod (PeriodBetween b e) = formatTime defaultTimeLocale "%0C%y/%m/%d" b ++ formatTime defaultTimeLocale "-%0C%y/%m/%d" (addDays (-1) e) -- STARTDATE-INCLUSIVEENDDATE showPeriod (PeriodFrom b) = formatTime defaultTimeLocale "%0C%y/%m/%d-" b -- STARTDATE- showPeriod (PeriodTo e) = formatTime defaultTimeLocale "-%0C%y/%m/%d" (addDays (-1) e) -- -INCLUSIVEENDDATE showPeriod PeriodAll = "-" periodStart :: Period -> Maybe Day periodStart p = mb where DateSpan mb _ = periodAsDateSpan p periodEnd :: Period -> Maybe Day periodEnd p = me where DateSpan _ me = periodAsDateSpan p -- | Move a standard period to the following period of same duration. -- Non-standard periods are unaffected. periodNext :: Period -> Period periodNext (DayPeriod b) = DayPeriod (addDays 1 b) periodNext (WeekPeriod b) = WeekPeriod (addDays 7 b) periodNext (MonthPeriod y 12) = MonthPeriod (y+1) 1 periodNext (MonthPeriod y m) = MonthPeriod y (m+1) periodNext (QuarterPeriod y 4) = QuarterPeriod (y+1) 1 periodNext (QuarterPeriod y q) = QuarterPeriod y (q+1) periodNext (YearPeriod y) = YearPeriod (y+1) periodNext p = p -- | Move a standard period to the preceding period of same duration. -- Non-standard periods are unaffected. periodPrevious :: Period -> Period periodPrevious (DayPeriod b) = DayPeriod (addDays (-1) b) periodPrevious (WeekPeriod b) = WeekPeriod (addDays (-7) b) periodPrevious (MonthPeriod y 1) = MonthPeriod (y-1) 12 periodPrevious (MonthPeriod y m) = MonthPeriod y (m-1) periodPrevious (QuarterPeriod y 1) = QuarterPeriod (y-1) 4 periodPrevious (QuarterPeriod y q) = QuarterPeriod y (q-1) periodPrevious (YearPeriod y) = YearPeriod (y-1) periodPrevious p = p -- | Move a standard period to the following period of same duration, staying within enclosing dates. -- Non-standard periods are unaffected. periodNextIn :: DateSpan -> Period -> Period periodNextIn (DateSpan _ (Just e)) p = case mb of Just b -> if b < e then p' else p _ -> p where p' = periodNext p mb = periodStart p' periodNextIn _ p = periodNext p -- | Move a standard period to the preceding period of same duration, staying within enclosing dates. -- Non-standard periods are unaffected. periodPreviousIn :: DateSpan -> Period -> Period periodPreviousIn (DateSpan (Just b) _) p = case me of Just e -> if e > b then p' else p _ -> p where p' = periodPrevious p me = periodEnd p' periodPreviousIn _ p = periodPrevious p -- | Move a standard period stepwise so that it encloses the given date. -- Non-standard periods are unaffected. periodMoveTo :: Day -> Period -> Period periodMoveTo d (DayPeriod _) = DayPeriod d periodMoveTo d (WeekPeriod _) = WeekPeriod $ mondayBefore d periodMoveTo d (MonthPeriod _ _) = MonthPeriod y m where (y,m,_) = toGregorian d periodMoveTo d (QuarterPeriod _ _) = QuarterPeriod y q where (y,m,_) = toGregorian d q = quarterContainingMonth m periodMoveTo d (YearPeriod _) = YearPeriod y where (y,_,_) = toGregorian d periodMoveTo _ p = p -- | Enlarge a standard period to the next larger enclosing standard period, if there is one. -- Eg, a day becomes the enclosing week. -- A week becomes whichever month the week's thursday falls into. -- A year becomes all (unlimited). -- Non-standard periods (arbitrary dates, or open-ended) are unaffected. periodGrow :: Period -> Period periodGrow (DayPeriod b) = WeekPeriod $ mondayBefore b periodGrow (WeekPeriod b) = MonthPeriod y m where (y,m) = yearMonthContainingWeekStarting b periodGrow (MonthPeriod y m) = QuarterPeriod y (quarterContainingMonth m) periodGrow (QuarterPeriod y _) = YearPeriod y periodGrow (YearPeriod _) = PeriodAll periodGrow p = p -- | Shrink a period to the next smaller standard period inside it, -- choosing the subperiod which contains today's date if possible, -- otherwise the first subperiod. It goes like this: -- unbounded periods and nonstandard periods (between two arbitrary dates) -> -- current year -> -- current quarter if it's in selected year, otherwise first quarter of selected year -> -- current month if it's in selected quarter, otherwise first month of selected quarter -> -- current week if it's in selected month, otherwise first week of selected month -> -- today if it's in selected week, otherwise first day of selected week, -- unless that's in previous month, in which case first day of month containing selected week. -- Shrinking a day has no effect. periodShrink :: Day -> Period -> Period periodShrink _ p@(DayPeriod _) = p periodShrink today (WeekPeriod b) | today >= b && diffDays today b < 7 = DayPeriod today | m /= weekmonth = DayPeriod $ fromGregorian weekyear weekmonth 1 | otherwise = DayPeriod b where (_,m,_) = toGregorian b (weekyear,weekmonth) = yearMonthContainingWeekStarting b periodShrink today (MonthPeriod y m) | (y',m') == (y,m) = WeekPeriod $ mondayBefore today | otherwise = WeekPeriod $ startOfFirstWeekInMonth y m where (y',m',_) = toGregorian today periodShrink today (QuarterPeriod y q) | quarterContainingMonth thismonth == q = MonthPeriod y thismonth | otherwise = MonthPeriod y (firstMonthOfQuarter q) where (_,thismonth,_) = toGregorian today periodShrink today (YearPeriod y) | y == thisyear = QuarterPeriod y thisquarter | otherwise = QuarterPeriod y 1 where (thisyear,thismonth,_) = toGregorian today thisquarter = quarterContainingMonth thismonth periodShrink today _ = YearPeriod y where (y,_,_) = toGregorian today mondayBefore d = addDays (fromIntegral (1 - wd)) d where (_,_,wd) = toWeekDate d yearMonthContainingWeekStarting weekstart = (y,m) where thu = addDays 3 weekstart (y,yd) = toOrdinalDate thu (m,_) = dayOfYearToMonthAndDay (isLeapYear y) yd quarterContainingMonth m = (m-1) `div` 3 + 1 firstMonthOfQuarter q = (q-1)*3 + 1 startOfFirstWeekInMonth y m | monthstartday <= 4 = mon | otherwise = addDays 7 mon -- month starts with a fri/sat/sun where monthstart = fromGregorian y m 1 mon = mondayBefore monthstart (_,_,monthstartday) = toWeekDate monthstart hledger-lib-1.2/Hledger/Data/StringFormat.hs0000644000000000000000000002131313035210046017110 0ustar0000000000000000-- | Parse format strings provided by --format, with awareness of -- hledger's report item fields. The formats are used by -- report-specific renderers like renderBalanceReportItem. {-# LANGUAGE FlexibleContexts, TypeFamilies #-} module Hledger.Data.StringFormat ( parseStringFormat , defaultStringFormatStyle , StringFormat(..) , StringFormatComponent(..) , ReportItemField(..) , tests ) where import Prelude () import Prelude.Compat import Numeric import Data.Char (isPrint) import Data.Maybe import Test.HUnit import Text.Megaparsec import Text.Megaparsec.String import Hledger.Utils.String (formatString) -- | A format specification/template to use when rendering a report line item as text. -- -- A format is a sequence of components; each is either a literal -- string, or a hledger report item field with specified width and -- justification whose value will be interpolated at render time. -- -- A component's value may be a multi-line string (or a -- multi-commodity amount), in which case the final string will be -- either single-line or a top or bottom-aligned multi-line string -- depending on the StringFormat variant used. -- -- Currently this is only used in the balance command's single-column -- mode, which provides a limited StringFormat renderer. -- data StringFormat = OneLine [StringFormatComponent] -- ^ multi-line values will be rendered on one line, comma-separated | TopAligned [StringFormatComponent] -- ^ values will be top-aligned (and bottom-padded to the same height) | BottomAligned [StringFormatComponent] -- ^ values will be bottom-aligned (and top-padded) deriving (Show, Eq) data StringFormatComponent = FormatLiteral String -- ^ Literal text to be rendered as-is | FormatField Bool (Maybe Int) (Maybe Int) ReportItemField -- ^ A data field to be formatted and interpolated. Parameters: -- -- - Left justify ? Right justified if false -- - Minimum width ? Will be space-padded if narrower than this -- - Maximum width ? Will be clipped if wider than this -- - Which of the standard hledger report item fields to interpolate deriving (Show, Eq) -- | An id identifying which report item field to interpolate. These -- are drawn from several hledger report types, so are not all -- applicable for a given report. data ReportItemField = AccountField -- ^ A posting or balance report item's account name | DefaultDateField -- ^ A posting or register or entry report item's date | DescriptionField -- ^ A posting or register or entry report item's description | TotalField -- ^ A balance or posting report item's balance or running total. -- Always rendered right-justified. | DepthSpacerField -- ^ A balance report item's indent level (which may be different from the account name depth). -- Rendered as this number of spaces, multiplied by the minimum width spec if any. | FieldNo Int -- ^ A report item's nth field. May be unimplemented. deriving (Show, Eq) ---------------------------------------------------------------------- -- renderStringFormat :: StringFormat -> Map String String -> String -- renderStringFormat fmt params = ---------------------------------------------------------------------- -- | Parse a string format specification, or return a parse error. parseStringFormat :: String -> Either String StringFormat parseStringFormat input = case (runParser (stringformatp <* eof) "(unknown)") input of Left y -> Left $ show y Right x -> Right x defaultStringFormatStyle = BottomAligned stringformatp :: Parser StringFormat stringformatp = do alignspec <- optional (try $ char '%' >> oneOf "^_,") let constructor = case alignspec of Just '^' -> TopAligned Just '_' -> BottomAligned Just ',' -> OneLine _ -> defaultStringFormatStyle constructor <$> many componentp componentp :: Parser StringFormatComponent componentp = formatliteralp <|> formatfieldp formatliteralp :: Parser StringFormatComponent formatliteralp = do s <- some c return $ FormatLiteral s where isPrintableButNotPercentage x = isPrint x && (not $ x == '%') c = (satisfy isPrintableButNotPercentage "printable character") <|> try (string "%%" >> return '%') formatfieldp :: Parser StringFormatComponent formatfieldp = do char '%' leftJustified <- optional (char '-') minWidth <- optional (some $ digitChar) maxWidth <- optional (do char '.'; some $ digitChar) -- TODO: Can this be (char '1') *> (some digitChar) char '(' f <- fieldp char ')' return $ FormatField (isJust leftJustified) (parseDec minWidth) (parseDec maxWidth) f where parseDec s = case s of Just text -> Just m where ((m,_):_) = readDec text _ -> Nothing fieldp :: Parser ReportItemField fieldp = do try (string "account" >> return AccountField) <|> try (string "depth_spacer" >> return DepthSpacerField) <|> try (string "date" >> return DescriptionField) <|> try (string "description" >> return DescriptionField) <|> try (string "total" >> return TotalField) <|> try (some digitChar >>= (\s -> return $ FieldNo $ read s)) ---------------------------------------------------------------------- testFormat :: StringFormatComponent -> String -> String -> Assertion testFormat fs value expected = assertEqual name expected actual where (name, actual) = case fs of FormatLiteral l -> ("literal", formatString False Nothing Nothing l) FormatField leftJustify min max _ -> ("field", formatString leftJustify min max value) testParser :: String -> StringFormat -> Assertion testParser s expected = case (parseStringFormat s) of Left error -> assertFailure $ show error Right actual -> assertEqual ("Input: " ++ s) expected actual tests = test [ formattingTests ++ parserTests ] formattingTests = [ testFormat (FormatLiteral " ") "" " " , testFormat (FormatField False Nothing Nothing DescriptionField) "description" "description" , testFormat (FormatField False (Just 20) Nothing DescriptionField) "description" " description" , testFormat (FormatField False Nothing (Just 20) DescriptionField) "description" "description" , testFormat (FormatField True Nothing (Just 20) DescriptionField) "description" "description" , testFormat (FormatField True (Just 20) Nothing DescriptionField) "description" "description " , testFormat (FormatField True (Just 20) (Just 20) DescriptionField) "description" "description " , testFormat (FormatField True Nothing (Just 3) DescriptionField) "description" "des" ] parserTests = [ testParser "" (defaultStringFormatStyle []) , testParser "D" (defaultStringFormatStyle [FormatLiteral "D"]) , testParser "%(date)" (defaultStringFormatStyle [FormatField False Nothing Nothing DescriptionField]) , testParser "%(total)" (defaultStringFormatStyle [FormatField False Nothing Nothing TotalField]) , testParser "^%(total)" (TopAligned [FormatField False Nothing Nothing TotalField]) , testParser "_%(total)" (BottomAligned [FormatField False Nothing Nothing TotalField]) , testParser ",%(total)" (OneLine [FormatField False Nothing Nothing TotalField]) , testParser "Hello %(date)!" (defaultStringFormatStyle [FormatLiteral "Hello ", FormatField False Nothing Nothing DescriptionField, FormatLiteral "!"]) , testParser "%-(date)" (defaultStringFormatStyle [FormatField True Nothing Nothing DescriptionField]) , testParser "%20(date)" (defaultStringFormatStyle [FormatField False (Just 20) Nothing DescriptionField]) , testParser "%.10(date)" (defaultStringFormatStyle [FormatField False Nothing (Just 10) DescriptionField]) , testParser "%20.10(date)" (defaultStringFormatStyle [FormatField False (Just 20) (Just 10) DescriptionField]) , testParser "%20(account) %.10(total)\n" (defaultStringFormatStyle [FormatField False (Just 20) Nothing AccountField , FormatLiteral " " , FormatField False Nothing (Just 10) TotalField , FormatLiteral "\n" ]) ] hledger-lib-1.2/Hledger/Data/Posting.hs0000644000000000000000000003000113066173044016120 0ustar0000000000000000{-| A 'Posting' represents a change (by some 'MixedAmount') of the balance in some 'Account'. Each 'Transaction' contains two or more postings which should add up to 0. Postings reference their parent transaction, so we can look up the date or description there. -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Posting ( -- * Posting nullposting, posting, post, -- * operations originalPosting, postingStatus, isReal, isVirtual, isBalancedVirtual, isEmptyPosting, isAssignment, hasAmount, postingAllTags, transactionAllTags, postingAllImplicitTags, relatedPostings, removePrices, -- * date operations postingDate, postingDate2, isPostingInDateSpan, isPostingInDateSpan', postingsDateSpan, postingsDateSpan', -- * account name operations accountNamesFromPostings, accountNamePostingType, accountNameWithoutPostingType, accountNameWithPostingType, joinAccountNames, concatAccountNames, accountNameApplyAliases, accountNameApplyAliasesMemo, -- * transaction description operations transactionPayee, transactionNote, payeeAndNoteFromDescription, -- * arithmetic sumPostings, -- * rendering showPosting, -- * misc. showComment, tests_Hledger_Data_Posting ) where import Data.List import Data.Maybe import Data.MemoUgly (memo) import Data.Monoid import Data.Ord import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Safe import Test.HUnit import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Amount import Hledger.Data.AccountName import Hledger.Data.Dates (nulldate, spanContainsDate) instance Show Posting where show = showPosting nullposting, posting :: Posting nullposting = Posting {pdate=Nothing ,pdate2=Nothing ,pstatus=Uncleared ,paccount="" ,pamount=nullmixedamt ,pcomment="" ,ptype=RegularPosting ,ptags=[] ,pbalanceassertion=Nothing ,ptransaction=Nothing ,porigin=Nothing } posting = nullposting post :: AccountName -> Amount -> Posting post acct amt = posting {paccount=acct, pamount=Mixed [amt]} originalPosting :: Posting -> Posting originalPosting p = fromMaybe p $ porigin p -- XXX once rendered user output, but just for debugging now; clean up showPosting :: Posting -> String showPosting p@Posting{paccount=a,pamount=amt,ptype=t} = unlines $ [concatTopPadded [show (postingDate p) ++ " ", showaccountname a ++ " ", showamount amt, showComment (pcomment p)]] where ledger3ishlayout = False acctnamewidth = if ledger3ishlayout then 25 else 22 showaccountname = fitString (Just acctnamewidth) Nothing False False . bracket . T.unpack . elideAccountName width (bracket,width) = case t of BalancedVirtualPosting -> (\s -> "["++s++"]", acctnamewidth-2) VirtualPosting -> (\s -> "("++s++")", acctnamewidth-2) _ -> (id,acctnamewidth) showamount = padLeftWide 12 . showMixedAmount showComment :: Text -> String showComment t = if T.null t then "" else " ;" ++ T.unpack t isReal :: Posting -> Bool isReal p = ptype p == RegularPosting isVirtual :: Posting -> Bool isVirtual p = ptype p == VirtualPosting isBalancedVirtual :: Posting -> Bool isBalancedVirtual p = ptype p == BalancedVirtualPosting hasAmount :: Posting -> Bool hasAmount = (/= missingmixedamt) . pamount isAssignment :: Posting -> Bool isAssignment p = not (hasAmount p) && isJust (pbalanceassertion p) accountNamesFromPostings :: [Posting] -> [AccountName] accountNamesFromPostings = nub . map paccount sumPostings :: [Posting] -> MixedAmount sumPostings = sumStrict . map pamount -- | Remove all prices of a posting removePrices :: Posting -> Posting removePrices p = p{ pamount = Mixed $ remove <$> amounts (pamount p) } where remove a = a { aprice = NoPrice } -- | Get a posting's (primary) date - it's own primary date if specified, -- otherwise the parent transaction's primary date, or the null date if -- there is no parent transaction. postingDate :: Posting -> Day postingDate p = fromMaybe txndate $ pdate p where txndate = maybe nulldate tdate $ ptransaction p -- | Get a posting's secondary (secondary) date, which is the first of: -- posting's secondary date, transaction's secondary date, posting's -- primary date, transaction's primary date, or the null date if there is -- no parent transaction. postingDate2 :: Posting -> Day postingDate2 p = headDef nulldate $ catMaybes dates where dates = [pdate2 p ,maybe Nothing tdate2 $ ptransaction p ,pdate p ,maybe Nothing (Just . tdate) $ ptransaction p ] -- | Get a posting's cleared status: cleared or pending if those are -- explicitly set, otherwise the cleared status of its parent -- transaction, or uncleared if there is no parent transaction. (Note -- Uncleared's ambiguity, it can mean "uncleared" or "don't know". postingStatus :: Posting -> ClearedStatus postingStatus Posting{pstatus=s, ptransaction=mt} | s == Uncleared = case mt of Just t -> tstatus t Nothing -> Uncleared | otherwise = s -- | Implicit tags for this transaction. transactionImplicitTags :: Transaction -> [Tag] transactionImplicitTags t = filter (not . T.null . snd) [("code", tcode t) ,("description", tdescription t) ,("payee", transactionPayee t) ,("note", transactionNote t) ] transactionPayee :: Transaction -> Text transactionPayee = fst . payeeAndNoteFromDescription . tdescription transactionNote :: Transaction -> Text transactionNote = fst . payeeAndNoteFromDescription . tdescription -- | Parse a transaction's description into payee and note (aka narration) fields, -- assuming a convention of separating these with | (like Beancount). -- Ie, everything up to the first | is the payee, everything after it is the note. -- When there's no |, payee == note == description. payeeAndNoteFromDescription :: Text -> (Text,Text) payeeAndNoteFromDescription t = (textstrip p, textstrip $ T.tail n) where (p,n) = T.breakOn "|" t -- | Tags for this posting including implicit and any inherited from its parent transaction. postingAllImplicitTags :: Posting -> [Tag] postingAllImplicitTags p = ptags p ++ maybe [] transactionTags (ptransaction p) where transactionTags t = ttags t ++ transactionImplicitTags t -- | Tags for this posting including any inherited from its parent transaction. postingAllTags :: Posting -> [Tag] postingAllTags p = ptags p ++ maybe [] ttags (ptransaction p) -- | Tags for this transaction including any from its postings. transactionAllTags :: Transaction -> [Tag] transactionAllTags t = ttags t ++ concatMap ptags (tpostings t) -- Get the other postings from this posting's transaction. relatedPostings :: Posting -> [Posting] relatedPostings p@Posting{ptransaction=Just t} = filter (/= p) $ tpostings t relatedPostings _ = [] -- | Does this posting fall within the given date span ? isPostingInDateSpan :: DateSpan -> Posting -> Bool isPostingInDateSpan s = spanContainsDate s . postingDate -- --date2-sensitive version, separate for now to avoid disturbing multiBalanceReport. isPostingInDateSpan' :: WhichDate -> DateSpan -> Posting -> Bool isPostingInDateSpan' PrimaryDate s = spanContainsDate s . postingDate isPostingInDateSpan' SecondaryDate s = spanContainsDate s . postingDate2 isEmptyPosting :: Posting -> Bool isEmptyPosting = isZeroMixedAmount . pamount -- | Get the minimal date span which contains all the postings, or the -- null date span if there are none. postingsDateSpan :: [Posting] -> DateSpan postingsDateSpan [] = DateSpan Nothing Nothing postingsDateSpan ps = DateSpan (Just $ postingDate $ head ps') (Just $ addDays 1 $ postingDate $ last ps') where ps' = sortBy (comparing postingDate) ps -- --date2-sensitive version, as above. postingsDateSpan' :: WhichDate -> [Posting] -> DateSpan postingsDateSpan' _ [] = DateSpan Nothing Nothing postingsDateSpan' wd ps = DateSpan (Just $ postingdate $ head ps') (Just $ addDays 1 $ postingdate $ last ps') where ps' = sortBy (comparing postingdate) ps postingdate = if wd == PrimaryDate then postingDate else postingDate2 -- AccountName stuff that depends on PostingType accountNamePostingType :: AccountName -> PostingType accountNamePostingType a | T.null a = RegularPosting | T.head a == '[' && T.last a == ']' = BalancedVirtualPosting | T.head a == '(' && T.last a == ')' = VirtualPosting | otherwise = RegularPosting accountNameWithoutPostingType :: AccountName -> AccountName accountNameWithoutPostingType a = case accountNamePostingType a of BalancedVirtualPosting -> T.init $ T.tail a VirtualPosting -> T.init $ T.tail a RegularPosting -> a accountNameWithPostingType :: PostingType -> AccountName -> AccountName accountNameWithPostingType BalancedVirtualPosting a = "["<>accountNameWithoutPostingType a<>"]" accountNameWithPostingType VirtualPosting a = "("<>accountNameWithoutPostingType a<>")" accountNameWithPostingType RegularPosting a = accountNameWithoutPostingType a -- | Prefix one account name to another, preserving posting type -- indicators like concatAccountNames. joinAccountNames :: AccountName -> AccountName -> AccountName joinAccountNames a b = concatAccountNames $ filter (not . T.null) [a,b] -- | Join account names into one. If any of them has () or [] posting type -- indicators, these (the first type encountered) will also be applied to -- the resulting account name. concatAccountNames :: [AccountName] -> AccountName concatAccountNames as = accountNameWithPostingType t $ T.intercalate ":" $ map accountNameWithoutPostingType as where t = headDef RegularPosting $ filter (/= RegularPosting) $ map accountNamePostingType as -- | Rewrite an account name using all matching aliases from the given list, in sequence. -- Each alias sees the result of applying the previous aliases. accountNameApplyAliases :: [AccountAlias] -> AccountName -> AccountName accountNameApplyAliases aliases a = accountNameWithPostingType atype aname' where (aname,atype) = (accountNameWithoutPostingType a, accountNamePostingType a) aname' = foldl (\acct alias -> dbg6 "result" $ aliasReplace (dbg6 "alias" alias) (dbg6 "account" acct)) aname aliases -- | Memoising version of accountNameApplyAliases, maybe overkill. accountNameApplyAliasesMemo :: [AccountAlias] -> AccountName -> AccountName accountNameApplyAliasesMemo aliases = memo (accountNameApplyAliases aliases) -- aliasMatches :: AccountAlias -> AccountName -> Bool -- aliasMatches (BasicAlias old _) a = old `isAccountNamePrefixOf` a -- aliasMatches (RegexAlias re _) a = regexMatchesCI re a aliasReplace :: AccountAlias -> AccountName -> AccountName aliasReplace (BasicAlias old new) a | old `isAccountNamePrefixOf` a || old == a = new <> T.drop (T.length old) a | otherwise = a aliasReplace (RegexAlias re repl) a = T.pack $ regexReplaceCIMemo re repl $ T.unpack a -- XXX tests_Hledger_Data_Posting = TestList [ "accountNamePostingType" ~: do accountNamePostingType "a" `is` RegularPosting accountNamePostingType "(a)" `is` VirtualPosting accountNamePostingType "[a]" `is` BalancedVirtualPosting ,"accountNameWithoutPostingType" ~: do accountNameWithoutPostingType "(a)" `is` "a" ,"accountNameWithPostingType" ~: do accountNameWithPostingType VirtualPosting "[a]" `is` "(a)" ,"joinAccountNames" ~: do "a" `joinAccountNames` "b:c" `is` "a:b:c" "a" `joinAccountNames` "(b:c)" `is` "(a:b:c)" "[a]" `joinAccountNames` "(b:c)" `is` "[a:b:c]" "" `joinAccountNames` "a" `is` "a" ,"concatAccountNames" ~: do concatAccountNames [] `is` "" concatAccountNames ["a","(b)","[c:d]"] `is` "(a:b:c:d)" ] hledger-lib-1.2/Hledger/Data/RawOptions.hs0000644000000000000000000000324613066746043016622 0ustar0000000000000000{-| hledger's cmdargs modes parse command-line arguments to an intermediate format, RawOpts (an association list), rather than a fixed ADT like CliOpts. This allows the modes and flags to be reused more easily by hledger commands/scripts in this and other packages. -} module Hledger.Data.RawOptions ( RawOpts, setopt, setboolopt, inRawOpts, boolopt, stringopt, maybestringopt, listofstringopt, intopt, maybeintopt ) where import Data.Maybe import qualified Data.Text as T import Safe import Hledger.Utils -- | The result of running cmdargs: an association list of option names to string values. type RawOpts = [(String,String)] setopt :: String -> String -> RawOpts -> RawOpts setopt name val = (++ [(name, quoteIfNeeded $ val)]) setboolopt :: String -> RawOpts -> RawOpts setboolopt name = (++ [(name,"")]) -- | Is the named option present ? inRawOpts :: String -> RawOpts -> Bool inRawOpts name = isJust . lookup name boolopt :: String -> RawOpts -> Bool boolopt = inRawOpts maybestringopt :: String -> RawOpts -> Maybe String maybestringopt name = maybe Nothing (Just . T.unpack . stripquotes . T.pack) . lookup name . reverse stringopt :: String -> RawOpts -> String stringopt name = fromMaybe "" . maybestringopt name listofstringopt :: String -> RawOpts -> [String] listofstringopt name rawopts = [v | (k,v) <- rawopts, k==name] maybeintopt :: String -> RawOpts -> Maybe Int maybeintopt name rawopts = let ms = maybestringopt name rawopts in case ms of Nothing -> Nothing Just s -> Just $ readDef (usageError $ "could not parse "++name++" number: "++s) s intopt :: String -> RawOpts -> Int intopt name = fromMaybe 0 . maybeintopt name hledger-lib-1.2/Hledger/Data/Timeclock.hs0000644000000000000000000001350213035210046016404 0ustar0000000000000000{-| A 'TimeclockEntry' is a clock-in, clock-out, or other directive in a timeclock file (see timeclock.el or the command-line version). These can be converted to 'Transactions' and queried like a ledger. -} {-# LANGUAGE CPP, OverloadedStrings #-} module Hledger.Data.Timeclock where import Data.Maybe -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Data.Time.Clock import Data.Time.Format import Data.Time.LocalTime #if !(MIN_VERSION_time(1,5,0)) import System.Locale (defaultTimeLocale) #endif import Test.HUnit import Text.Printf import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Amount import Hledger.Data.Posting import Hledger.Data.Transaction instance Show TimeclockEntry where show t = printf "%s %s %s %s" (show $ tlcode t) (show $ tldatetime t) (tlaccount t) (tldescription t) instance Show TimeclockCode where show SetBalance = "b" show SetRequiredHours = "h" show In = "i" show Out = "o" show FinalOut = "O" instance Read TimeclockCode where readsPrec _ ('b' : xs) = [(SetBalance, xs)] readsPrec _ ('h' : xs) = [(SetRequiredHours, xs)] readsPrec _ ('i' : xs) = [(In, xs)] readsPrec _ ('o' : xs) = [(Out, xs)] readsPrec _ ('O' : xs) = [(FinalOut, xs)] readsPrec _ _ = [] -- | Convert time log entries to journal transactions. When there is no -- clockout, add one with the provided current time. Sessions crossing -- midnight are split into days to give accurate per-day totals. timeclockEntriesToTransactions :: LocalTime -> [TimeclockEntry] -> [Transaction] timeclockEntriesToTransactions _ [] = [] timeclockEntriesToTransactions now [i] | odate > idate = entryFromTimeclockInOut i o' : timeclockEntriesToTransactions now [i',o] | otherwise = [entryFromTimeclockInOut i o] where o = TimeclockEntry (tlsourcepos i) Out end "" "" end = if itime > now then itime else now (itime,otime) = (tldatetime i,tldatetime o) (idate,odate) = (localDay itime,localDay otime) o' = o{tldatetime=itime{localDay=idate, localTimeOfDay=TimeOfDay 23 59 59}} i' = i{tldatetime=itime{localDay=addDays 1 idate, localTimeOfDay=midnight}} timeclockEntriesToTransactions now (i:o:rest) | odate > idate = entryFromTimeclockInOut i o' : timeclockEntriesToTransactions now (i':o:rest) | otherwise = entryFromTimeclockInOut i o : timeclockEntriesToTransactions now rest where (itime,otime) = (tldatetime i,tldatetime o) (idate,odate) = (localDay itime,localDay otime) o' = o{tldatetime=itime{localDay=idate, localTimeOfDay=TimeOfDay 23 59 59}} i' = i{tldatetime=itime{localDay=addDays 1 idate, localTimeOfDay=midnight}} -- | Convert a timeclock clockin and clockout entry to an equivalent journal -- transaction, representing the time expenditure. Note this entry is not balanced, -- since we omit the \"assets:time\" transaction for simpler output. entryFromTimeclockInOut :: TimeclockEntry -> TimeclockEntry -> Transaction entryFromTimeclockInOut i o | otime >= itime = t | otherwise = error' $ "clock-out time less than clock-in time in:\n" ++ showTransaction t where t = Transaction { tindex = 0, tsourcepos = tlsourcepos i, tdate = idate, tdate2 = Nothing, tstatus = Cleared, tcode = "", tdescription = desc, tcomment = "", ttags = [], tpostings = ps, tpreceding_comment_lines="" } itime = tldatetime i otime = tldatetime o itod = localTimeOfDay itime otod = localTimeOfDay otime idate = localDay itime desc | T.null (tldescription i) = T.pack $ showtime itod ++ "-" ++ showtime otod | otherwise = tldescription i showtime = take 5 . show hours = elapsedSeconds (toutc otime) (toutc itime) / 3600 where toutc = localTimeToUTC utc acctname = tlaccount i amount = Mixed [hrs hours] ps = [posting{paccount=acctname, pamount=amount, ptype=VirtualPosting, ptransaction=Just t}] tests_Hledger_Data_Timeclock = TestList [ "timeclockEntriesToTransactions" ~: do today <- getCurrentDay now' <- getCurrentTime tz <- getCurrentTimeZone let now = utcToLocalTime tz now' nowstr = showtime now yesterday = prevday today clockin = TimeclockEntry nullsourcepos In mktime d = LocalTime d . fromMaybe midnight . #if MIN_VERSION_time(1,5,0) parseTimeM True defaultTimeLocale "%H:%M:%S" #else parseTime defaultTimeLocale "%H:%M:%S" #endif showtime = formatTime defaultTimeLocale "%H:%M" assertEntriesGiveStrings name es ss = assertEqual name ss (map (T.unpack . tdescription) $ timeclockEntriesToTransactions now es) assertEntriesGiveStrings "started yesterday, split session at midnight" [clockin (mktime yesterday "23:00:00") "" ""] ["23:00-23:59","00:00-"++nowstr] assertEntriesGiveStrings "split multi-day sessions at each midnight" [clockin (mktime (addDays (-2) today) "23:00:00") "" ""] ["23:00-23:59","00:00-23:59","00:00-"++nowstr] assertEntriesGiveStrings "auto-clock-out if needed" [clockin (mktime today "00:00:00") "" ""] ["00:00-"++nowstr] let future = utcToLocalTime tz $ addUTCTime 100 now' futurestr = showtime future assertEntriesGiveStrings "use the clockin time for auto-clockout if it's in the future" [clockin future "" ""] [printf "%s-%s" futurestr futurestr] ] hledger-lib-1.2/Hledger/Data/Transaction.hs0000644000000000000000000007435613066173044017007 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-| A 'Transaction' represents a movement of some commodity(ies) between two or more accounts. It consists of multiple account 'Posting's which balance to zero, a date, and optional extras like description, cleared status, and tags. -} {-# LANGUAGE OverloadedStrings, LambdaCase #-} module Hledger.Data.Transaction ( -- * Transaction nullsourcepos, nulltransaction, txnTieKnot, txnUntieKnot, -- * operations showAccountName, hasRealPostings, realPostings, assignmentPostings, virtualPostings, balancedVirtualPostings, transactionsPostings, isTransactionBalanced, -- nonzerobalanceerror, -- * date operations transactionDate2, -- * arithmetic transactionPostingBalances, balanceTransaction, balanceTransactionUpdate, -- * rendering showTransaction, showTransactionUnelided, showTransactionUnelidedOneLineAmounts, showPostingLine, showPostingLines, -- * GenericSourcePos sourceFilePath, sourceFirstLine, showGenericSourcePos, -- * misc. tests_Hledger_Data_Transaction ) where import Data.List import Control.Monad.Except import Control.Monad.Identity import Data.Maybe import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Test.HUnit import Text.Printf import qualified Data.Map as Map import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Posting import Hledger.Data.Amount instance Show Transaction where show = showTransactionUnelided instance Show ModifierTransaction where show t = "= " ++ T.unpack (mtvalueexpr t) ++ "\n" ++ unlines (map show (mtpostings t)) instance Show PeriodicTransaction where show t = "~ " ++ T.unpack (ptperiodicexpr t) ++ "\n" ++ unlines (map show (ptpostings t)) sourceFilePath :: GenericSourcePos -> FilePath sourceFilePath = \case GenericSourcePos fp _ _ -> fp JournalSourcePos fp _ -> fp sourceFirstLine :: GenericSourcePos -> Int sourceFirstLine = \case GenericSourcePos _ line _ -> line JournalSourcePos _ (line, _) -> line showGenericSourcePos :: GenericSourcePos -> String showGenericSourcePos = \case GenericSourcePos fp line column -> show fp ++ " (line " ++ show line ++ ", column " ++ show column ++ ")" JournalSourcePos fp (line, line') -> show fp ++ " (lines " ++ show line ++ "-" ++ show line' ++ ")" nullsourcepos :: GenericSourcePos nullsourcepos = GenericSourcePos "" 1 1 nulltransaction :: Transaction nulltransaction = Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=nulldate, tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="", tcomment="", ttags=[], tpostings=[], tpreceding_comment_lines="" } {-| Show a journal transaction, formatted for the print command. ledger 2.x's standard format looks like this: @ yyyy/mm/dd[ *][ CODE] description......... [ ; comment...............] account name 1..................... ...$amount1[ ; comment...............] account name 2..................... ..$-amount1[ ; comment...............] pcodewidth = no limit -- 10 -- mimicking ledger layout. pdescwidth = no limit -- 20 -- I don't remember what these mean, pacctwidth = 35 minimum, no maximum -- they were important at the time. pamtwidth = 11 pcommentwidth = no limit -- 22 @ -} showTransaction :: Transaction -> String showTransaction = showTransactionHelper True False showTransactionUnelided :: Transaction -> String showTransactionUnelided = showTransactionHelper False False tests_showTransactionUnelided = [ "showTransactionUnelided" ~: do let t `gives` s = assertEqual "" s (showTransactionUnelided t) nulltransaction `gives` "0000/01/01\n\n" nulltransaction{ tdate=parsedate "2012/05/14", tdate2=Just $ parsedate "2012/05/15", tstatus=Uncleared, tcode="code", tdescription="desc", tcomment="tcomment1\ntcomment2\n", ttags=[("ttag1","val1")], tpostings=[ nullposting{ pstatus=Cleared, paccount="a", pamount=Mixed [usd 1, hrs 2], pcomment="\npcomment2\n", ptype=RegularPosting, ptags=[("ptag1","val1"),("ptag2","val2")] } ] } `gives` unlines [ "2012/05/14=2012/05/15 (code) desc ; tcomment1", " ; tcomment2", " * a $1.00", " ; pcomment2", " * a 2.00h", " ; pcomment2", "" ] ] showTransactionUnelidedOneLineAmounts :: Transaction -> String showTransactionUnelidedOneLineAmounts = showTransactionHelper False True -- cf showPosting showTransactionHelper :: Bool -> Bool -> Transaction -> String showTransactionHelper elide onelineamounts t = unlines $ [descriptionline] ++ newlinecomments ++ (postingsAsLines elide onelineamounts t (tpostings t)) ++ [""] where descriptionline = rstrip $ concat [date, status, code, desc, samelinecomment] date = showDate (tdate t) ++ maybe "" (("="++) . showDate) (tdate2 t) status | tstatus t == Cleared = " *" | tstatus t == Pending = " !" | otherwise = "" code = if T.length (tcode t) > 0 then printf " (%s)" $ T.unpack $ tcode t else "" desc = if null d then "" else " " ++ d where d = T.unpack $ tdescription t (samelinecomment, newlinecomments) = case renderCommentLines (tcomment t) of [] -> ("",[]) c:cs -> (c,cs) -- Render a transaction or posting's comment as indented, semicolon-prefixed comment lines. renderCommentLines :: Text -> [String] renderCommentLines t = case lines $ T.unpack t of ("":ls) -> "":map commentprefix ls ls -> map commentprefix ls where commentprefix = indent . ("; "++) -- -- Render a transaction or posting's comment as semicolon-prefixed comment lines - -- -- an inline (same-line) comment if it's a single line, otherwise multiple indented lines. -- commentLines' :: String -> (String, [String]) -- commentLines' s -- | null s = ("", []) -- | length ls == 1 = (prefix $ head ls, []) -- | otherwise = ("", (prefix $ head ls):(map prefix $ tail ls)) -- where -- ls = lines s -- prefix = indent . (";"++) postingsAsLines :: Bool -> Bool -> Transaction -> [Posting] -> [String] postingsAsLines elide onelineamounts t ps | elide && length ps > 1 && isTransactionBalanced Nothing t -- imprecise balanced check = (concatMap (postingAsLines False onelineamounts ps) $ init ps) ++ postingAsLines True onelineamounts ps (last ps) | otherwise = concatMap (postingAsLines False onelineamounts ps) ps postingAsLines :: Bool -> Bool -> [Posting] -> Posting -> [String] postingAsLines elideamount onelineamounts ps p = concat [ postingblock ++ newlinecomments | postingblock <- postingblocks] where postingblocks = [map rstrip $ lines $ concatTopPadded [account, " ", amount, assertion, samelinecomment] | amount <- shownAmounts] assertion = maybe "" ((" = " ++) . showAmountWithZeroCommodity) $ pbalanceassertion p account = indent $ showstatus p ++ fitString (Just acctwidth) Nothing False True (showAccountName Nothing (ptype p) (paccount p)) where showstatus p = if pstatus p == Cleared then "* " else "" acctwidth = maximum $ map (textWidth . paccount) ps -- currently prices are considered part of the amount string when right-aligning amounts shownAmounts | elideamount = [""] | onelineamounts = [fitString (Just amtwidth) Nothing False False $ showMixedAmountOneLine $ pamount p] | otherwise = map (fitStringMulti (Just amtwidth) Nothing False False . showAmount ) . amounts $ pamount p where amtwidth = maximum $ 12 : map (strWidth . showMixedAmount . pamount) ps -- min. 12 for backwards compatibility (samelinecomment, newlinecomments) = case renderCommentLines (pcomment p) of [] -> ("",[]) c:cs -> (c,cs) -- used in balance assertion error showPostingLine p = indent $ if pstatus p == Cleared then "* " else "" ++ showAccountName Nothing (ptype p) (paccount p) ++ " " ++ showMixedAmountOneLine (pamount p) -- | Produce posting line with all comment lines associated with it showPostingLines :: Posting -> [String] showPostingLines p = postingAsLines False False ps p where ps | Just t <- ptransaction p = tpostings t | otherwise = [p] tests_postingAsLines = [ "postingAsLines" ~: do let p `gives` ls = assertEqual (show p) ls (postingAsLines False False [p] p) posting `gives` [] posting{ pstatus=Cleared, paccount="a", pamount=Mixed [usd 1, hrs 2], pcomment="pcomment1\npcomment2\n tag3: val3 \n", ptype=RegularPosting, ptags=[("ptag1","val1"),("ptag2","val2")] } `gives` [ " * a $1.00 ; pcomment1", " ; pcomment2", " ; tag3: val3 ", " * a 2.00h ; pcomment1", " ; pcomment2", " ; tag3: val3 " ] ] tests_inference = [ "inferBalancingAmount" ~: do let p `gives` p' = assertEqual (show p) (Right p') $ inferTransaction p inferTransaction :: Transaction -> Either String Transaction inferTransaction = runIdentity . runExceptT . inferBalancingAmount (\_ _ -> return ()) nulltransaction `gives` nulltransaction nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` missingamt ]} `gives` nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` usd 5 ]} nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` (eur 3 @@ usd 4), "c" `post` missingamt ]} `gives` nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` (eur 3 @@ usd 4), "c" `post` usd 1 ]} ] indent :: String -> String indent = (" "++) -- | Show an account name, clipped to the given width if any, and -- appropriately bracketed/parenthesised for the given posting type. showAccountName :: Maybe Int -> PostingType -> AccountName -> String showAccountName w = fmt where fmt RegularPosting = take w' . T.unpack fmt VirtualPosting = parenthesise . reverse . take (w'-2) . reverse . T.unpack fmt BalancedVirtualPosting = bracket . reverse . take (w'-2) . reverse . T.unpack w' = fromMaybe 999999 w parenthesise :: String -> String parenthesise s = "("++s++")" bracket :: String -> String bracket s = "["++s++"]" hasRealPostings :: Transaction -> Bool hasRealPostings = not . null . realPostings realPostings :: Transaction -> [Posting] realPostings = filter isReal . tpostings assignmentPostings :: Transaction -> [Posting] assignmentPostings = filter isAssignment . tpostings virtualPostings :: Transaction -> [Posting] virtualPostings = filter isVirtual . tpostings balancedVirtualPostings :: Transaction -> [Posting] balancedVirtualPostings = filter isBalancedVirtual . tpostings transactionsPostings :: [Transaction] -> [Posting] transactionsPostings = concat . map tpostings -- | Get the sums of a transaction's real, virtual, and balanced virtual postings. transactionPostingBalances :: Transaction -> (MixedAmount,MixedAmount,MixedAmount) transactionPostingBalances t = (sumPostings $ realPostings t ,sumPostings $ virtualPostings t ,sumPostings $ balancedVirtualPostings t) -- | Is this transaction balanced ? A balanced transaction's real -- (non-virtual) postings sum to 0, and any balanced virtual postings -- also sum to 0. isTransactionBalanced :: Maybe (Map.Map CommoditySymbol AmountStyle) -> Transaction -> Bool isTransactionBalanced styles t = -- isReallyZeroMixedAmountCost rsum && isReallyZeroMixedAmountCost bvsum isZeroMixedAmount rsum' && isZeroMixedAmount bvsum' where (rsum, _, bvsum) = transactionPostingBalances t rsum' = canonicalise $ costOfMixedAmount rsum bvsum' = canonicalise $ costOfMixedAmount bvsum canonicalise = maybe id canonicaliseMixedAmount styles -- | Ensure this transaction is balanced, possibly inferring a missing -- amount or conversion price(s), or return an error message. -- Balancing is affected by commodity display precisions, so those can -- (optionally) be provided. -- -- this fails for example, if there are several missing amounts -- (possibly with balance assignments) balanceTransaction :: Maybe (Map.Map CommoditySymbol AmountStyle) -> Transaction -> Either String Transaction balanceTransaction stylemap = runIdentity . runExceptT . balanceTransactionUpdate (\_ _ -> return ()) stylemap -- | More general version of 'balanceTransaction' that takes an update -- function balanceTransactionUpdate :: MonadError String m => (AccountName -> MixedAmount -> m ()) -- ^ update function -> Maybe (Map.Map CommoditySymbol AmountStyle) -> Transaction -> m Transaction balanceTransactionUpdate update styles t = finalize =<< inferBalancingAmount update t where finalize t' = let t'' = inferBalancingPrices t' in if isTransactionBalanced styles t'' then return $ txnTieKnot t'' else throwError $ printerr $ nonzerobalanceerror t'' printerr s = intercalate "\n" [s, showTransactionUnelided t] nonzerobalanceerror :: Transaction -> String nonzerobalanceerror t = printf "could not balance this transaction (%s%s%s)" rmsg sep bvmsg where (rsum, _, bvsum) = transactionPostingBalances t rmsg | isReallyZeroMixedAmountCost rsum = "" | otherwise = "real postings are off by " ++ showMixedAmount (costOfMixedAmount rsum) bvmsg | isReallyZeroMixedAmountCost bvsum = "" | otherwise = "balanced virtual postings are off by " ++ showMixedAmount (costOfMixedAmount bvsum) sep = if not (null rmsg) && not (null bvmsg) then "; " else "" :: String -- | Infer up to one missing amount for this transactions's real postings, and -- likewise for its balanced virtual postings, if needed; or return an error -- message if we can't. -- -- We can infer a missing amount when there are multiple postings and exactly -- one of them is amountless. If the amounts had price(s) the inferred amount -- have the same price(s), and will be converted to the price commodity. inferBalancingAmount :: MonadError String m => (AccountName -> MixedAmount -> m ()) -- ^ update function -> Transaction -> m Transaction inferBalancingAmount update t@Transaction{tpostings=ps} | length amountlessrealps > 1 = throwError $ printerr "could not balance this transaction - can't have more than one real posting with no amount (remember to put 2 or more spaces before amounts)" | length amountlessbvps > 1 = throwError $ printerr "could not balance this transaction - can't have more than one balanced virtual posting with no amount (remember to put 2 or more spaces before amounts)" | otherwise = do postings <- mapM inferamount ps return t{tpostings=postings} where printerr s = intercalate "\n" [s, showTransactionUnelided t] (amountfulrealps, amountlessrealps) = partition hasAmount (realPostings t) realsum = sumStrict $ map pamount amountfulrealps (amountfulbvps, amountlessbvps) = partition hasAmount (balancedVirtualPostings t) bvsum = sumStrict $ map pamount amountfulbvps inferamount p@Posting{ptype=RegularPosting} | not (hasAmount p) = updateAmount p realsum inferamount p@Posting{ptype=BalancedVirtualPosting} | not (hasAmount p) = updateAmount p bvsum inferamount p = return p updateAmount p amt = update (paccount p) amt' >> return p { pamount=amt', porigin=Just $ originalPosting p } where amt' = normaliseMixedAmount $ costOfMixedAmount (-amt) -- | Infer prices for this transaction's posting amounts, if needed to make -- the postings balance, and if possible. This is done once for the real -- postings and again (separately) for the balanced virtual postings. When -- it's not possible, the transaction is left unchanged. -- -- The simplest example is a transaction with two postings, each in a -- different commodity, with no prices specified. In this case we'll add a -- price to the first posting such that it can be converted to the commodity -- of the second posting (with -B), and such that the postings balance. -- -- In general, we can infer a conversion price when the sum of posting amounts -- contains exactly two different commodities and no explicit prices. Also -- all postings are expected to contain an explicit amount (no missing -- amounts) in a single commodity. Otherwise no price inferring is attempted. -- -- The transaction itself could contain more than two commodities, and/or -- prices, if they cancel out; what matters is that the sum of posting amounts -- contains exactly two commodities and zero prices. -- -- There can also be more than two postings in either of the commodities. -- -- We want to avoid excessive display of digits when the calculated price is -- an irrational number, while hopefully also ensuring the displayed numbers -- make sense if the user does a manual calculation. This is (mostly) achieved -- in two ways: -- -- - when there is only one posting in the "from" commodity, a total price -- (@@) is used, and all available decimal digits are shown -- -- - otherwise, a suitable averaged unit price (@) is applied to the relevant -- postings, with display precision equal to the summed display precisions -- of the two commodities being converted between, or 2, whichever is larger. -- -- (We don't always calculate a good-looking display precision for unit prices -- when the commodity display precisions are low, eg when a journal doesn't -- use any decimal places. The minimum of 2 helps make the prices shown by the -- print command a bit less surprising in this case. Could do better.) -- inferBalancingPrices :: Transaction -> Transaction inferBalancingPrices t@Transaction{tpostings=ps} = t{tpostings=ps'} where ps' = map (priceInferrerFor t BalancedVirtualPosting) $ map (priceInferrerFor t RegularPosting) $ ps -- | Generate a posting update function which assigns a suitable balancing -- price to the posting, if and as appropriate for the given transaction and -- posting type (real or balanced virtual). priceInferrerFor :: Transaction -> PostingType -> (Posting -> Posting) priceInferrerFor t pt = inferprice where postings = filter ((==pt).ptype) $ tpostings t pmixedamounts = map pamount postings pamounts = concatMap amounts pmixedamounts pcommodities = map acommodity pamounts sumamounts = amounts $ sumStrict pmixedamounts -- sum normalises to one amount per commodity & price sumcommodities = map acommodity sumamounts sumprices = filter (/=NoPrice) $ map aprice sumamounts caninferprices = length sumcommodities == 2 && null sumprices inferprice p@Posting{pamount=Mixed [a]} | caninferprices && ptype p == pt && acommodity a == fromcommodity = p{pamount=Mixed [a{aprice=conversionprice}], porigin=Just $ originalPosting p} where fromcommodity = head $ filter (`elem` sumcommodities) pcommodities -- these heads are ugly but should be safe conversionprice | fromcount==1 = TotalPrice $ abs toamount `withPrecision` maxprecision | otherwise = UnitPrice $ abs unitprice `withPrecision` unitprecision where fromcount = length $ filter ((==fromcommodity).acommodity) pamounts fromamount = head $ filter ((==fromcommodity).acommodity) sumamounts tocommodity = head $ filter (/=fromcommodity) sumcommodities toamount = head $ filter ((==tocommodity).acommodity) sumamounts unitprice = toamount `divideAmount` (aquantity fromamount) unitprecision = max 2 ((asprecision $ astyle $ toamount) + (asprecision $ astyle $ fromamount)) inferprice p = p -- Get a transaction's secondary date, defaulting to the primary date. transactionDate2 :: Transaction -> Day transactionDate2 t = fromMaybe (tdate t) $ tdate2 t -- | Ensure a transaction's postings refer back to it, so that eg -- relatedPostings works right. txnTieKnot :: Transaction -> Transaction txnTieKnot t@Transaction{tpostings=ps} = t' where t' = t{tpostings=map (postingSetTransaction t') ps} -- | Ensure a transaction's postings do not refer back to it, so that eg -- recursiveSize and GHCI's :sprint work right. txnUntieKnot :: Transaction -> Transaction txnUntieKnot t@Transaction{tpostings=ps} = t{tpostings=map (\p -> p{ptransaction=Nothing}) ps} -- | Set a posting's parent transaction. postingSetTransaction :: Transaction -> Posting -> Posting postingSetTransaction t p = p{ptransaction=Just t} tests_Hledger_Data_Transaction = TestList $ concat [ tests_postingAsLines, tests_showTransactionUnelided, tests_inference, [ "showTransaction" ~: do assertEqual "show a balanced transaction, eliding last amount" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking" ,"" ]) (let t = Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18], ptransaction=Just t} ,posting{paccount="assets:checking", pamount=Mixed [usd (-47.18)], ptransaction=Just t} ] "" in showTransaction t) ,"showTransaction" ~: do assertEqual "show a balanced transaction, no eliding" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking $-47.18" ,"" ]) (let t = Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18], ptransaction=Just t} ,posting{paccount="assets:checking", pamount=Mixed [usd (-47.18)], ptransaction=Just t} ] "" in showTransactionUnelided t) -- document some cases that arise in debug/testing: ,"showTransaction" ~: do assertEqual "show an unbalanced transaction, should not elide" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking $-47.19" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18]} ,posting{paccount="assets:checking", pamount=Mixed [usd (-47.19)]} ] "")) ,"showTransaction" ~: do assertEqual "show an unbalanced transaction with one posting, should not elide" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18]} ] "")) ,"showTransaction" ~: do assertEqual "show a transaction with one posting and a missing amount" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=missingmixedamt} ] "")) ,"showTransaction" ~: do assertEqual "show a transaction with a priced commodityless amount" (unlines ["2010/01/01 x" ," a 1 @ $2" ," b" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2010/01/01") Nothing Uncleared "" "x" "" [] [posting{paccount="a", pamount=Mixed [num 1 `at` (usd 2 `withPrecision` 0)]} ,posting{paccount="b", pamount= missingmixedamt} ] "")) ,"balanceTransaction" ~: do assertBool "detect unbalanced entry, sign error" (isLeft $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "test" "" [] [posting{paccount="a", pamount=Mixed [usd 1]} ,posting{paccount="b", pamount=Mixed [usd 1]} ] "")) assertBool "detect unbalanced entry, multiple missing amounts" (isLeft $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "test" "" [] [posting{paccount="a", pamount=missingmixedamt} ,posting{paccount="b", pamount=missingmixedamt} ] "")) let e = balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1]} ,posting{paccount="b", pamount=missingmixedamt} ] "") assertBool "balanceTransaction allows one missing amount" (isRight e) assertEqual "balancing amount is inferred" (Mixed [usd (-1)]) (case e of Right e' -> (pamount $ last $ tpostings e') Left _ -> error' "should not happen") let e = balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2011/01/01") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1.35]} ,posting{paccount="b", pamount=Mixed [eur (-1)]} ] "") assertBool "balanceTransaction can infer conversion price" (isRight e) assertEqual "balancing conversion price is inferred" (Mixed [usd 1.35 @@ (eur 1 `withPrecision` maxprecision)]) (case e of Right e' -> (pamount $ head $ tpostings e') Left _ -> error' "should not happen") assertBool "balanceTransaction balances based on cost if there are unit prices" (isRight $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2011/01/01") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1 `at` eur 2]} ,posting{paccount="a", pamount=Mixed [usd (-2) `at` eur 1]} ] "")) assertBool "balanceTransaction balances based on cost if there are total prices" (isRight $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2011/01/01") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1 @@ eur 1]} ,posting{paccount="a", pamount=Mixed [usd (-2) @@ eur 1]} ] "")) ,"isTransactionBalanced" ~: do let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ] "" assertBool "detect balanced" (isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.01)], ptransaction=Just t} ] "" assertBool "detect unbalanced" (not $ isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ] "" assertBool "detect unbalanced, one posting" (not $ isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 0], ptransaction=Just t} ] "" assertBool "one zero posting is considered balanced for now" (isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ,posting{paccount="d", pamount=Mixed [usd 100], ptype=VirtualPosting, ptransaction=Just t} ] "" assertBool "virtual postings don't need to balance" (isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ,posting{paccount="d", pamount=Mixed [usd 100], ptype=BalancedVirtualPosting, ptransaction=Just t} ] "" assertBool "balanced virtual postings need to balance among themselves" (not $ isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ,posting{paccount="d", pamount=Mixed [usd 100], ptype=BalancedVirtualPosting, ptransaction=Just t} ,posting{paccount="3", pamount=Mixed [usd (-100)], ptype=BalancedVirtualPosting, ptransaction=Just t} ] "" assertBool "balanced virtual postings need to balance among themselves (2)" (isTransactionBalanced Nothing t) ]] hledger-lib-1.2/Hledger/Data/AutoTransaction.hs0000644000000000000000000001325013066173044017622 0ustar0000000000000000{-# LANGUAGE OverloadedStrings, ViewPatterns #-} {-| This module provides utilities for applying automated transactions like 'ModifierTransaction' and 'PeriodicTransaction'. -} module Hledger.Data.AutoTransaction ( -- * Transaction processors runModifierTransaction , runPeriodicTransaction -- * Accessors , mtvaluequery , jdatespan ) where import Data.Maybe import Data.Monoid ((<>)) import Data.Time.Calendar import qualified Data.Text as T import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Amount import Hledger.Data.Transaction import Hledger.Utils.Parse import Hledger.Utils.UTF8IOCompat (error') import Hledger.Query -- $setup -- >>> :set -XOverloadedStrings -- >>> import Hledger.Data.Posting -- >>> import Hledger.Data.Journal -- | Builds a 'Transaction' transformer based on 'ModifierTransaction'. -- -- 'Query' parameter allows injection of additional restriction on posting -- match. Don't forget to call 'txnTieKnot'. -- -- >>> runModifierTransaction Any (ModifierTransaction "" ["pong" `post` usd 2]) nulltransaction{tpostings=["ping" `post` usd 1]} -- 0000/01/01 -- ping $1.00 -- pong $2.00 -- -- -- >>> runModifierTransaction Any (ModifierTransaction "miss" ["pong" `post` usd 2]) nulltransaction{tpostings=["ping" `post` usd 1]} -- 0000/01/01 -- ping $1.00 -- -- -- >>> runModifierTransaction None (ModifierTransaction "" ["pong" `post` usd 2]) nulltransaction{tpostings=["ping" `post` usd 1]} -- 0000/01/01 -- ping $1.00 -- -- -- >>> runModifierTransaction Any (ModifierTransaction "ping" ["pong" `post` amount{acommodity="*", aquantity=3}]) nulltransaction{tpostings=["ping" `post` usd 2]} -- 0000/01/01 -- ping $2.00 -- pong $6.00 -- -- runModifierTransaction :: Query -> ModifierTransaction -> (Transaction -> Transaction) runModifierTransaction q mt = modifier where q' = simplifyQuery $ And [q, mtvaluequery mt (error "query cannot depend on current time")] mods = map runModifierPosting $ mtpostings mt generatePostings ps = [m p | p <- ps, q' `matchesPosting` p, m <- mods] modifier t@(tpostings -> ps) = t { tpostings = ps ++ generatePostings ps } -- | Extract 'Query' equivalent of 'mtvalueexpr' from 'ModifierTransaction' -- -- >>> mtvaluequery (ModifierTransaction "" []) undefined -- Any -- >>> mtvaluequery (ModifierTransaction "ping" []) undefined -- Acct "ping" -- >>> mtvaluequery (ModifierTransaction "date:2016" []) undefined -- Date (DateSpan 2016) -- >>> mtvaluequery (ModifierTransaction "date:today" []) (read "2017-01-01") -- Date (DateSpan 2017/01/01) mtvaluequery :: ModifierTransaction -> (Day -> Query) mtvaluequery mt = fst . flip parseQuery (mtvalueexpr mt) -- | 'DateSpan' of all dates mentioned in 'Journal' -- -- >>> jdatespan nulljournal -- DateSpan - -- >>> jdatespan nulljournal{jtxns=[nulltransaction{tdate=read "2016-01-01"}] } -- DateSpan 2016/01/01 -- >>> jdatespan nulljournal{jtxns=[nulltransaction{tdate=read "2016-01-01", tpostings=[nullposting{pdate=Just $ read "2016-02-01"}]}] } -- DateSpan 2016/01/01-2016/02/01 jdatespan :: Journal -> DateSpan jdatespan j | null dates = nulldatespan | otherwise = DateSpan (Just $ minimum dates) (Just $ 1 `addDays` maximum dates) where dates = concatMap tdates $ jtxns j -- | 'DateSpan' of all dates mentioned in 'Transaction' -- -- >>> tdates nulltransaction -- [0000-01-01] tdates :: Transaction -> [Day] tdates t = tdate t : concatMap pdates (tpostings t) ++ maybeToList (tdate2 t) where pdates p = catMaybes [pdate p, pdate2 p] postingScale :: Posting -> Maybe Quantity postingScale p = case amounts $ pamount p of [a] | acommodity a == "*" -> Just $ aquantity a _ -> Nothing runModifierPosting :: Posting -> (Posting -> Posting) runModifierPosting p' = modifier where modifier p = renderPostingCommentDates $ p' { pdate = pdate p , pdate2 = pdate2 p , pamount = amount' p } amount' = case postingScale p' of Nothing -> const $ pamount p' Just n -> \p -> pamount p `divideMixedAmount` (1/n) renderPostingCommentDates :: Posting -> Posting renderPostingCommentDates p = p { pcomment = comment' } where datesComment = T.concat $ catMaybes [T.pack . showDate <$> pdate p, ("=" <>) . T.pack . showDate <$> pdate2 p] comment' | T.null datesComment = pcomment p | otherwise = T.intercalate "\n" $ filter (not . T.null) [T.strip $ pcomment p, "[" <> datesComment <> "]"] -- | Generate transactions from 'PeriodicTransaction' within a 'DateSpan' -- -- Note that new transactions require 'txnTieKnot' post-processing. -- -- >>> mapM_ (putStr . show) $ runPeriodicTransaction (PeriodicTransaction "monthly from 2017/1 to 2017/4" ["hi" `post` usd 1]) nulldatespan -- 2017/01/01 -- hi $1.00 -- -- 2017/02/01 -- hi $1.00 -- -- 2017/03/01 -- hi $1.00 -- runPeriodicTransaction :: PeriodicTransaction -> (DateSpan -> [Transaction]) runPeriodicTransaction pt = generate where base = nulltransaction { tpostings = ptpostings pt } periodExpr = ptperiodicexpr pt errCurrent = error' $ "Current date cannot be referenced in " ++ show (T.unpack periodExpr) (interval, effectspan) = case parsePeriodExpr errCurrent periodExpr of Left e -> error' $ "Failed to parse " ++ show (T.unpack periodExpr) ++ ": " ++ showDateParseError e Right x -> x generate jspan = [base {tdate=date} | span <- interval `splitSpan` spanIntersect effectspan jspan, let Just date = spanStart span] hledger-lib-1.2/Hledger/Data/Types.hs0000644000000000000000000003607113066173044015616 0ustar0000000000000000{-# LANGUAGE CPP, DeriveDataTypeable, StandaloneDeriving, DeriveGeneric, TypeSynonymInstances, FlexibleInstances #-} {-| Most data types are defined here to avoid import cycles. Here is an overview of the hledger data model: > Journal -- a journal is read from one or more data files. It contains.. > [Transaction] -- journal transactions (aka entries), which have date, cleared status, code, description and.. > [Posting] -- multiple account postings, which have account name and amount > [MarketPrice] -- historical market prices for commodities > > Ledger -- a ledger is derived from a journal, by applying a filter specification and doing some further processing. It contains.. > Journal -- a filtered copy of the original journal, containing only the transactions and postings we are interested in > [Account] -- all accounts, in tree order beginning with a "root" account", with their balances and sub/parent accounts For more detailed documentation on each type, see the corresponding modules. -} module Hledger.Data.Types where import GHC.Generics (Generic) import Control.DeepSeq (NFData) import Control.Monad.Except (ExceptT) import Data.Data import Data.Decimal import Data.Default import Text.Blaze (ToMarkup(..)) import qualified Data.Map as M import Data.Text (Text) -- import qualified Data.Text as T import Data.Time.Calendar import Data.Time.LocalTime import System.Time (ClockTime(..)) import Hledger.Utils.Regex type SmartDate = (String,String,String) data WhichDate = PrimaryDate | SecondaryDate deriving (Eq,Show) data DateSpan = DateSpan (Maybe Day) (Maybe Day) deriving (Eq,Ord,Data,Generic,Typeable) instance NFData DateSpan -- synonyms for various date-related scalars type Year = Integer type Month = Int -- 1-12 type Quarter = Int -- 1-4 type YearWeek = Int -- 1-52 type MonthWeek = Int -- 1-5 type YearDay = Int -- 1-366 type MonthDay = Int -- 1-31 type WeekDay = Int -- 1-7 -- Typical report periods (spans of time), both finite and open-ended. -- A richer abstraction than DateSpan. data Period = DayPeriod Day | WeekPeriod Day | MonthPeriod Year Month | QuarterPeriod Year Quarter | YearPeriod Year | PeriodBetween Day Day | PeriodFrom Day | PeriodTo Day | PeriodAll deriving (Eq,Ord,Show,Data,Generic,Typeable) instance Default Period where def = PeriodAll ---- Typical report period/subperiod durations, from a day to a year. --data Duration = -- DayLong -- WeekLong -- MonthLong -- QuarterLong -- YearLong -- deriving (Eq,Ord,Show,Data,Generic,Typeable) -- Ways in which a period can be divided into subperiods. data Interval = NoInterval | Days Int | Weeks Int | Months Int | Quarters Int | Years Int | DayOfMonth Int | DayOfWeek Int -- WeekOfYear Int -- MonthOfYear Int -- QuarterOfYear Int deriving (Eq,Show,Ord,Data,Generic,Typeable) instance Default Interval where def = NoInterval instance NFData Interval type AccountName = Text data AccountAlias = BasicAlias AccountName AccountName | RegexAlias Regexp Replacement deriving (Eq, Read, Show, Ord, Data, Generic, Typeable) instance NFData AccountAlias data Side = L | R deriving (Eq,Show,Read,Ord,Typeable,Data,Generic) instance NFData Side -- | The basic numeric type used in amounts. type Quantity = Decimal deriving instance Data (Quantity) -- The following is for hledger-web, and requires blaze-markup. -- Doing it here avoids needing a matching flag on the hledger-web package. instance ToMarkup (Quantity) where toMarkup = toMarkup . show -- | An amount's price (none, per unit, or total) in another commodity. -- Note the price should be a positive number, although this is not enforced. data Price = NoPrice | UnitPrice Amount | TotalPrice Amount deriving (Eq,Ord,Typeable,Data,Generic) instance NFData Price -- | Display style for an amount. data AmountStyle = AmountStyle { ascommodityside :: Side, -- ^ does the symbol appear on the left or the right ? ascommodityspaced :: Bool, -- ^ space between symbol and quantity ? asprecision :: !Int, -- ^ number of digits displayed after the decimal point asdecimalpoint :: Maybe Char, -- ^ character used as decimal point: period or comma. Nothing means "unspecified, use default" asdigitgroups :: Maybe DigitGroupStyle -- ^ style for displaying digit groups, if any } deriving (Eq,Ord,Read,Show,Typeable,Data,Generic) instance NFData AmountStyle -- | A style for displaying digit groups in the integer part of a -- floating point number. It consists of the character used to -- separate groups (comma or period, whichever is not used as decimal -- point), and the size of each group, starting with the one nearest -- the decimal point. The last group size is assumed to repeat. Eg, -- comma between thousands is DigitGroups ',' [3]. data DigitGroupStyle = DigitGroups Char [Int] deriving (Eq,Ord,Read,Show,Typeable,Data,Generic) instance NFData DigitGroupStyle type CommoditySymbol = Text data Commodity = Commodity { csymbol :: CommoditySymbol, cformat :: Maybe AmountStyle } deriving (Show,Eq,Data,Generic) --,Ord,Typeable,Data,Generic) instance NFData Commodity data Amount = Amount { acommodity :: CommoditySymbol, aquantity :: Quantity, aprice :: Price, -- ^ the (fixed) price for this amount, if any astyle :: AmountStyle } deriving (Eq,Ord,Typeable,Data,Generic) instance NFData Amount newtype MixedAmount = Mixed [Amount] deriving (Eq,Ord,Typeable,Data,Generic) instance NFData MixedAmount data PostingType = RegularPosting | VirtualPosting | BalancedVirtualPosting deriving (Eq,Show,Typeable,Data,Generic) instance NFData PostingType type TagName = Text type TagValue = Text type Tag = (TagName, TagValue) -- ^ A tag name and (possibly empty) value. data ClearedStatus = Uncleared | Pending | Cleared deriving (Eq,Ord,Typeable,Data,Generic) instance NFData ClearedStatus instance Show ClearedStatus where -- custom show.. bad idea.. don't do it.. show Uncleared = "" show Pending = "!" show Cleared = "*" data Posting = Posting { pdate :: Maybe Day, -- ^ this posting's date, if different from the transaction's pdate2 :: Maybe Day, -- ^ this posting's secondary date, if different from the transaction's pstatus :: ClearedStatus, paccount :: AccountName, pamount :: MixedAmount, pcomment :: Text, -- ^ this posting's comment lines, as a single non-indented multi-line string ptype :: PostingType, ptags :: [Tag], -- ^ tag names and values, extracted from the comment pbalanceassertion :: Maybe Amount, -- ^ optional: the expected balance in this commodity in the account after this posting ptransaction :: Maybe Transaction, -- ^ this posting's parent transaction (co-recursive types). -- Tying this knot gets tedious, Maybe makes it easier/optional. porigin :: Maybe Posting -- ^ original posting if this one is result of any transformations (one level only) } deriving (Typeable,Data,Generic) instance NFData Posting -- The equality test for postings ignores the parent transaction's -- identity, to avoid infinite loops. instance Eq Posting where (==) (Posting a1 b1 c1 d1 e1 f1 g1 h1 i1 _ _) (Posting a2 b2 c2 d2 e2 f2 g2 h2 i2 _ _) = a1==a2 && b1==b2 && c1==c2 && d1==d2 && e1==e2 && f1==f2 && g1==g2 && h1==h2 && i1==i2 -- TODO: needs renaming, or removal if no longer needed. See also TextPosition in Hledger.UI.Editor -- | The position of parse errors (eg), like parsec's SourcePos but generic. data GenericSourcePos = GenericSourcePos FilePath Int Int -- ^ name, 1-based line number and 1-based column number. | JournalSourcePos FilePath (Int, Int) -- ^ file name, inclusive range of 1-based line numbers (first, last). deriving (Eq, Read, Show, Ord, Data, Generic, Typeable) instance NFData GenericSourcePos data Transaction = Transaction { tindex :: Integer, -- ^ this transaction's 1-based position in the input stream, or 0 when not available tsourcepos :: GenericSourcePos, tdate :: Day, tdate2 :: Maybe Day, tstatus :: ClearedStatus, tcode :: Text, tdescription :: Text, tcomment :: Text, -- ^ this transaction's comment lines, as a single non-indented multi-line string ttags :: [Tag], -- ^ tag names and values, extracted from the comment tpostings :: [Posting], -- ^ this transaction's postings tpreceding_comment_lines :: Text -- ^ any comment lines immediately preceding this transaction } deriving (Eq,Typeable,Data,Generic) instance NFData Transaction data ModifierTransaction = ModifierTransaction { mtvalueexpr :: Text, mtpostings :: [Posting] } deriving (Eq,Typeable,Data,Generic) instance NFData ModifierTransaction data PeriodicTransaction = PeriodicTransaction { ptperiodicexpr :: Text, ptpostings :: [Posting] } deriving (Eq,Typeable,Data,Generic) instance NFData PeriodicTransaction data TimeclockCode = SetBalance | SetRequiredHours | In | Out | FinalOut deriving (Eq,Ord,Typeable,Data,Generic) instance NFData TimeclockCode data TimeclockEntry = TimeclockEntry { tlsourcepos :: GenericSourcePos, tlcode :: TimeclockCode, tldatetime :: LocalTime, tlaccount :: AccountName, tldescription :: Text } deriving (Eq,Ord,Typeable,Data,Generic) instance NFData TimeclockEntry data MarketPrice = MarketPrice { mpdate :: Day, mpcommodity :: CommoditySymbol, mpamount :: Amount } deriving (Eq,Ord,Typeable,Data,Generic) -- & Show (in Amount.hs) instance NFData MarketPrice -- | A Journal, containing transactions and various other things. -- The basic data model for hledger. -- -- This is used during parsing (as the type alias ParsedJournal), and -- then finalised/validated for use as a Journal. Some extra -- parsing-related fields are included for convenience, at least for -- now. In a ParsedJournal these are updated as parsing proceeds, in a -- Journal they represent the final state at end of parsing (used eg -- by the add command). -- data Journal = Journal { -- parsing-related data jparsedefaultyear :: Maybe Year -- ^ the current default year, specified by the most recent Y directive (or current date) ,jparsedefaultcommodity :: Maybe (CommoditySymbol,AmountStyle) -- ^ the current default commodity and its format, specified by the most recent D directive ,jparseparentaccounts :: [AccountName] -- ^ the current stack of parent account names, specified by apply account directives ,jparsealiases :: [AccountAlias] -- ^ the current account name aliases in effect, specified by alias directives (& options ?) -- ,jparsetransactioncount :: Integer -- ^ the current count of transactions parsed so far (only journal format txns, currently) ,jparsetimeclockentries :: [TimeclockEntry] -- ^ timeclock sessions which have not been clocked out -- principal data ,jaccounts :: [AccountName] -- ^ accounts that have been declared by account directives ,jcommodities :: M.Map CommoditySymbol Commodity -- ^ commodities and formats declared by commodity directives ,jinferredcommodities :: M.Map CommoditySymbol AmountStyle -- ^ commodities and formats inferred from journal amounts ,jmarketprices :: [MarketPrice] ,jmodifiertxns :: [ModifierTransaction] ,jperiodictxns :: [PeriodicTransaction] ,jtxns :: [Transaction] ,jfinalcommentlines :: Text -- ^ any final trailing comments in the (main) journal file ,jfiles :: [(FilePath, Text)] -- ^ the file path and raw text of the main and -- any included journal files. The main file is first, -- followed by any included files in the order encountered. ,jlastreadtime :: ClockTime -- ^ when this journal was last read from its file(s) } deriving (Eq, Typeable, Data, Generic) deriving instance Data (ClockTime) deriving instance Typeable (ClockTime) deriving instance Generic (ClockTime) instance NFData ClockTime instance NFData Journal -- | A journal in the process of being parsed, not yet finalised. -- The data is partial, and list fields are in reverse order. type ParsedJournal = Journal -- | The id of a data format understood by hledger, eg @journal@ or @csv@. -- The --output-format option selects one of these for output. type StorageFormat = String -- | A hledger journal reader is a triple of storage format name, a -- detector of that format, and a parser from that format to Journal. data Reader = Reader { -- The canonical name of the format handled by this reader rFormat :: StorageFormat -- The file extensions recognised as containing this format ,rExtensions :: [String] -- A text parser for this format, accepting an optional rules file, -- assertion-checking flag, and file path for error messages, -- producing an exception-raising IO action that returns a journal -- or error message. ,rParser :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal -- Experimental readers are never tried automatically. ,rExperimental :: Bool } instance Show Reader where show r = rFormat r ++ " reader" -- | An account, with name, balances and links to parent/subaccounts -- which let you walk up or down the account tree. data Account = Account { aname :: AccountName, -- ^ this account's full name aebalance :: MixedAmount, -- ^ this account's balance, excluding subaccounts asubs :: [Account], -- ^ sub-accounts anumpostings :: Int, -- ^ number of postings to this account -- derived from the above : aibalance :: MixedAmount, -- ^ this account's balance, including subaccounts aparent :: Maybe Account, -- ^ parent account aboring :: Bool -- ^ used in the accounts report to label elidable parents } deriving (Typeable, Data, Generic) -- | A Ledger has the journal it derives from, and the accounts -- derived from that. Accounts are accessible both list-wise and -- tree-wise, since each one knows its parent and subs; the first -- account is the root of the tree and always exists. data Ledger = Ledger { ljournal :: Journal, laccounts :: [Account] } hledger-lib-1.2/Hledger/Query.hs0000644000000000000000000010553513042200120014724 0ustar0000000000000000{-| A general query system for matching things (accounts, postings, transactions..) by various criteria, and a parser for query expressions. -} {-# LANGUAGE DeriveDataTypeable, OverloadedStrings, ViewPatterns #-} module Hledger.Query ( -- * Query and QueryOpt Query(..), QueryOpt(..), -- * parsing parseQuery, simplifyQuery, filterQuery, -- * accessors queryIsNull, queryIsAcct, queryIsDepth, queryIsDate, queryIsDate2, queryIsDateOrDate2, queryIsStartDateOnly, queryIsSym, queryIsReal, queryIsStatus, queryIsEmpty, queryStartDate, queryEndDate, queryDateSpan, queryDateSpan', queryDepth, inAccount, inAccountQuery, -- * matching matchesTransaction, matchesPosting, matchesAccount, matchesMixedAmount, matchesAmount, words'', -- * tests tests_Hledger_Query ) where import Data.Data import Data.Either import Data.List import Data.Maybe import Data.Monoid ((<>)) -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Safe (readDef, headDef) import Test.HUnit import Text.Megaparsec import Text.Megaparsec.Text import Hledger.Utils hiding (words') import Hledger.Data.Types import Hledger.Data.AccountName import Hledger.Data.Amount (amount, nullamt, usd) import Hledger.Data.Dates import Hledger.Data.Posting import Hledger.Data.Transaction -- | A query is a composition of search criteria, which can be used to -- match postings, transactions, accounts and more. data Query = Any -- ^ always match | None -- ^ never match | Not Query -- ^ negate this match | Or [Query] -- ^ match if any of these match | And [Query] -- ^ match if all of these match | Code Regexp -- ^ match if code matches this regexp | Desc Regexp -- ^ match if description matches this regexp | Acct Regexp -- ^ match postings whose account matches this regexp | Date DateSpan -- ^ match if primary date in this date span | Date2 DateSpan -- ^ match if secondary date in this date span | Status ClearedStatus -- ^ match txns/postings with this cleared status (Status Uncleared matches all states except cleared) | Real Bool -- ^ match if "realness" (involves a real non-virtual account ?) has this value | Amt OrdPlus Quantity -- ^ match if the amount's numeric quantity is less than/greater than/equal to/unsignedly equal to some value | Sym Regexp -- ^ match if the entire commodity symbol is matched by this regexp | Empty Bool -- ^ if true, show zero-amount postings/accounts which are usually not shown -- more of a query option than a query criteria ? | Depth Int -- ^ match if account depth is less than or equal to this value. -- Depth is sometimes used like a query (for filtering report data) -- and sometimes like a query option (for controlling display) | Tag Regexp (Maybe Regexp) -- ^ match if a tag's name, and optionally its value, is matched by these respective regexps -- matching the regexp if provided, exists deriving (Eq,Data,Typeable) -- custom Show implementation to show strings more accurately, eg for debugging regexps instance Show Query where show Any = "Any" show None = "None" show (Not q) = "Not (" ++ show q ++ ")" show (Or qs) = "Or (" ++ show qs ++ ")" show (And qs) = "And (" ++ show qs ++ ")" show (Code r) = "Code " ++ show r show (Desc r) = "Desc " ++ show r show (Acct r) = "Acct " ++ show r show (Date ds) = "Date (" ++ show ds ++ ")" show (Date2 ds) = "Date2 (" ++ show ds ++ ")" show (Status b) = "Status " ++ show b show (Real b) = "Real " ++ show b show (Amt ord qty) = "Amt " ++ show ord ++ " " ++ show qty show (Sym r) = "Sym " ++ show r show (Empty b) = "Empty " ++ show b show (Depth n) = "Depth " ++ show n show (Tag s ms) = "Tag " ++ show s ++ " (" ++ show ms ++ ")" -- | A query option changes a query's/report's behaviour and output in some way. data QueryOpt = QueryOptInAcctOnly AccountName -- ^ show an account register focussed on this account | QueryOptInAcct AccountName -- ^ as above but include sub-accounts in the account register -- | QueryOptCostBasis -- ^ show amounts converted to cost where possible -- | QueryOptDate2 -- ^ show secondary dates instead of primary dates deriving (Show, Eq, Data, Typeable) -- parsing -- -- | A query restricting the account(s) to be shown in the sidebar, if any. -- -- Just looks at the first query option. -- showAccountMatcher :: [QueryOpt] -> Maybe Query -- showAccountMatcher (QueryOptInAcctSubsOnly a:_) = Just $ Acct True $ accountNameToAccountRegex a -- showAccountMatcher _ = Nothing -- | Convert a query expression containing zero or more space-separated -- terms to a query and zero or more query options. A query term is either: -- -- 1. a search pattern, which matches on one or more fields, eg: -- -- acct:REGEXP - match the account name with a regular expression -- desc:REGEXP - match the transaction description -- date:PERIODEXP - match the date with a period expression -- -- The prefix indicates the field to match, or if there is no prefix -- account name is assumed. -- -- 2. a query option, which modifies the reporting behaviour in some -- way. There is currently one of these, which may appear only once: -- -- inacct:FULLACCTNAME -- -- The usual shell quoting rules are assumed. When a pattern contains -- whitespace, it (or the whole term including prefix) should be enclosed -- in single or double quotes. -- -- Period expressions may contain relative dates, so a reference date is -- required to fully parse these. -- -- Multiple terms are combined as follows: -- 1. multiple account patterns are OR'd together -- 2. multiple description patterns are OR'd together -- 3. then all terms are AND'd together parseQuery :: Day -> T.Text -> (Query,[QueryOpt]) parseQuery d s = (q, opts) where terms = words'' prefixes s (pats, opts) = partitionEithers $ map (parseQueryTerm d) terms (descpats, pats') = partition queryIsDesc pats (acctpats, otherpats) = partition queryIsAcct pats' q = simplifyQuery $ And $ [Or acctpats, Or descpats] ++ otherpats tests_parseQuery = [ "parseQuery" ~: do let d = nulldate -- parsedate "2011/1/1" parseQuery d "acct:'expenses:autres d\233penses' desc:b" `is` (And [Acct "expenses:autres d\233penses", Desc "b"], []) parseQuery d "inacct:a desc:\"b b\"" `is` (Desc "b b", [QueryOptInAcct "a"]) parseQuery d "inacct:a inacct:b" `is` (Any, [QueryOptInAcct "a", QueryOptInAcct "b"]) parseQuery d "desc:'x x'" `is` (Desc "x x", []) parseQuery d "'a a' 'b" `is` (Or [Acct "a a",Acct "'b"], []) parseQuery d "\"" `is` (Acct "\"", []) ] -- XXX -- | Quote-and-prefix-aware version of words - don't split on spaces which -- are inside quotes, including quotes which may have one of the specified -- prefixes in front, and maybe an additional not: prefix in front of that. words'' :: [T.Text] -> T.Text -> [T.Text] words'' prefixes = fromparse . parsewith maybeprefixedquotedphrases -- XXX where maybeprefixedquotedphrases :: Parser [T.Text] maybeprefixedquotedphrases = choice' [prefixedQuotedPattern, singleQuotedPattern, doubleQuotedPattern, pattern] `sepBy` some spacenonewline prefixedQuotedPattern :: Parser T.Text prefixedQuotedPattern = do not' <- fromMaybe "" `fmap` (optional $ string "not:") let allowednexts | null not' = prefixes | otherwise = prefixes ++ [""] next <- fmap T.pack $ choice' $ map (string . T.unpack) allowednexts let prefix :: T.Text prefix = T.pack not' <> next p <- singleQuotedPattern <|> doubleQuotedPattern return $ prefix <> stripquotes p singleQuotedPattern :: Parser T.Text singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf ("'" :: [Char])) >>= return . stripquotes . T.pack doubleQuotedPattern :: Parser T.Text doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf ("\"" :: [Char])) >>= return . stripquotes . T.pack pattern :: Parser T.Text pattern = fmap T.pack $ many (noneOf (" \n\r" :: [Char])) tests_words'' = [ "words''" ~: do assertEqual "1" ["a","b"] (words'' [] "a b") assertEqual "2" ["a b"] (words'' [] "'a b'") assertEqual "3" ["not:a","b"] (words'' [] "not:a b") assertEqual "4" ["not:a b"] (words'' [] "not:'a b'") assertEqual "5" ["not:a b"] (words'' [] "'not:a b'") assertEqual "6" ["not:desc:a b"] (words'' ["desc:"] "not:desc:'a b'") let s `gives` r = assertEqual "" r (words'' prefixes s) "\"acct:expenses:autres d\233penses\"" `gives` ["acct:expenses:autres d\233penses"] "\"" `gives` ["\""] ] -- XXX -- keep synced with patterns below, excluding "not" prefixes :: [T.Text] prefixes = map (<>":") [ "inacctonly" ,"inacct" ,"amt" ,"code" ,"desc" ,"acct" ,"date" ,"date2" ,"status" ,"cur" ,"real" ,"empty" ,"depth" ,"tag" ] defaultprefix :: T.Text defaultprefix = "acct" -- -- | Parse the query string as a boolean tree of match patterns. -- parseQueryTerm :: String -> Query -- parseQueryTerm s = either (const (Any)) id $ runParser query () "" $ lexmatcher s -- lexmatcher :: String -> [String] -- lexmatcher s = words' s -- query :: GenParser String () Query -- query = undefined -- | Parse a single query term as either a query or a query option, -- or raise an error if it has invalid syntax. parseQueryTerm :: Day -> T.Text -> Either Query QueryOpt parseQueryTerm _ (T.stripPrefix "inacctonly:" -> Just s) = Right $ QueryOptInAcctOnly s parseQueryTerm _ (T.stripPrefix "inacct:" -> Just s) = Right $ QueryOptInAcct s parseQueryTerm d (T.stripPrefix "not:" -> Just s) = case parseQueryTerm d s of Left m -> Left $ Not m Right _ -> Left Any -- not:somequeryoption will be ignored parseQueryTerm _ (T.stripPrefix "code:" -> Just s) = Left $ Code $ T.unpack s parseQueryTerm _ (T.stripPrefix "desc:" -> Just s) = Left $ Desc $ T.unpack s parseQueryTerm _ (T.stripPrefix "acct:" -> Just s) = Left $ Acct $ T.unpack s parseQueryTerm d (T.stripPrefix "date2:" -> Just s) = case parsePeriodExpr d s of Left e -> error' $ "\"date2:"++T.unpack s++"\" gave a "++showDateParseError e Right (_,span) -> Left $ Date2 span parseQueryTerm d (T.stripPrefix "date:" -> Just s) = case parsePeriodExpr d s of Left e -> error' $ "\"date:"++T.unpack s++"\" gave a "++showDateParseError e Right (_,span) -> Left $ Date span parseQueryTerm _ (T.stripPrefix "status:" -> Just s) = case parseStatus s of Left e -> error' $ "\"status:"++T.unpack s++"\" gave a parse error: " ++ e Right st -> Left $ Status st parseQueryTerm _ (T.stripPrefix "real:" -> Just s) = Left $ Real $ parseBool s || T.null s parseQueryTerm _ (T.stripPrefix "amt:" -> Just s) = Left $ Amt ord q where (ord, q) = parseAmountQueryTerm s parseQueryTerm _ (T.stripPrefix "empty:" -> Just s) = Left $ Empty $ parseBool s parseQueryTerm _ (T.stripPrefix "depth:" -> Just s) | n >= 0 = Left $ Depth n | otherwise = error' "depth: should have a positive number" where n = readDef 0 (T.unpack s) parseQueryTerm _ (T.stripPrefix "cur:" -> Just s) = Left $ Sym (T.unpack s) -- support cur: as an alias parseQueryTerm _ (T.stripPrefix "tag:" -> Just s) = Left $ Tag n v where (n,v) = parseTag s parseQueryTerm _ "" = Left $ Any parseQueryTerm d s = parseQueryTerm d $ defaultprefix<>":"<>s tests_parseQueryTerm = [ "parseQueryTerm" ~: do let s `gives` r = parseQueryTerm nulldate s `is` r "a" `gives` (Left $ Acct "a") "acct:expenses:autres d\233penses" `gives` (Left $ Acct "expenses:autres d\233penses") "not:desc:a b" `gives` (Left $ Not $ Desc "a b") "status:1" `gives` (Left $ Status Cleared) "status:*" `gives` (Left $ Status Cleared) "status:!" `gives` (Left $ Status Pending) "status:0" `gives` (Left $ Status Uncleared) "status:" `gives` (Left $ Status Uncleared) "real:1" `gives` (Left $ Real True) "date:2008" `gives` (Left $ Date $ DateSpan (Just $ parsedate "2008/01/01") (Just $ parsedate "2009/01/01")) "date:from 2012/5/17" `gives` (Left $ Date $ DateSpan (Just $ parsedate "2012/05/17") Nothing) "inacct:a" `gives` (Right $ QueryOptInAcct "a") "tag:a" `gives` (Left $ Tag "a" Nothing) "tag:a=some value" `gives` (Left $ Tag "a" (Just "some value")) -- "amt:<0" `gives` (Left $ Amt LT 0) -- "amt:=.23" `gives` (Left $ Amt EQ 0.23) -- "amt:>10000.10" `gives` (Left $ Amt GT 10000.1) ] data OrdPlus = Lt | LtEq | Gt | GtEq | Eq | AbsLt | AbsLtEq | AbsGt | AbsGtEq | AbsEq deriving (Show,Eq,Data,Typeable) -- can fail parseAmountQueryTerm :: T.Text -> (OrdPlus, Quantity) parseAmountQueryTerm s' = case s' of -- feel free to do this a smarter way "" -> err (T.stripPrefix "<+" -> Just s) -> (Lt, readDef err (T.unpack s)) (T.stripPrefix "<=+" -> Just s) -> (LtEq, readDef err (T.unpack s)) (T.stripPrefix ">+" -> Just s) -> (Gt, readDef err (T.unpack s)) (T.stripPrefix ">=+" -> Just s) -> (GtEq, readDef err (T.unpack s)) (T.stripPrefix "=+" -> Just s) -> (Eq, readDef err (T.unpack s)) (T.stripPrefix "+" -> Just s) -> (Eq, readDef err (T.unpack s)) (T.stripPrefix "<-" -> Just s) -> (Lt, negate $ readDef err (T.unpack s)) (T.stripPrefix "<=-" -> Just s) -> (LtEq, negate $ readDef err (T.unpack s)) (T.stripPrefix ">-" -> Just s) -> (Gt, negate $ readDef err (T.unpack s)) (T.stripPrefix ">=-" -> Just s) -> (GtEq, negate $ readDef err (T.unpack s)) (T.stripPrefix "=-" -> Just s) -> (Eq, negate $ readDef err (T.unpack s)) (T.stripPrefix "-" -> Just s) -> (Eq, negate $ readDef err (T.unpack s)) (T.stripPrefix "<=" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (LtEq, 0) _ -> (AbsLtEq, n) (T.stripPrefix "<" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (Lt, 0) _ -> (AbsLt, n) (T.stripPrefix ">=" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (GtEq, 0) _ -> (AbsGtEq, n) (T.stripPrefix ">" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (Gt, 0) _ -> (AbsGt, n) (T.stripPrefix "=" -> Just s) -> (AbsEq, readDef err (T.unpack s)) s -> (AbsEq, readDef err (T.unpack s)) where err = error' $ "could not parse as '=', '<', or '>' (optional) followed by a (optionally signed) numeric quantity: " ++ T.unpack s' tests_parseAmountQueryTerm = [ "parseAmountQueryTerm" ~: do let s `gives` r = parseAmountQueryTerm s `is` r "<0" `gives` (Lt,0) -- special case for convenience, since AbsLt 0 would be always false ">0" `gives` (Gt,0) -- special case for convenience and consistency with above ">10000.10" `gives` (AbsGt,10000.1) "=0.23" `gives` (AbsEq,0.23) "0.23" `gives` (AbsEq,0.23) "<=+0.23" `gives` (LtEq,0.23) "-0.23" `gives` (Eq,(-0.23)) ] parseTag :: T.Text -> (Regexp, Maybe Regexp) parseTag s | "=" `T.isInfixOf` s = (T.unpack n, Just $ tail $ T.unpack v) | otherwise = (T.unpack s, Nothing) where (n,v) = T.break (=='=') s -- | Parse the value part of a "status:" query, or return an error. parseStatus :: T.Text -> Either String ClearedStatus parseStatus s | s `elem` ["*","1"] = Right Cleared | s `elem` ["!"] = Right Pending | s `elem` ["","0"] = Right Uncleared | otherwise = Left $ "could not parse "++show s++" as a status (should be *, ! or empty)" -- | Parse the boolean value part of a "status:" query. "1" means true, -- anything else will be parsed as false without error. parseBool :: T.Text -> Bool parseBool s = s `elem` truestrings truestrings :: [T.Text] truestrings = ["1"] simplifyQuery :: Query -> Query simplifyQuery q = let q' = simplify q in if q' == q then q else simplifyQuery q' where simplify (And []) = Any simplify (And [q]) = simplify q simplify (And qs) | same qs = simplify $ head qs | any (==None) qs = None | all queryIsDate qs = Date $ spansIntersect $ mapMaybe queryTermDateSpan qs | otherwise = And $ concat $ [map simplify dateqs, map simplify otherqs] where (dateqs, otherqs) = partition queryIsDate $ filter (/=Any) qs simplify (Or []) = Any simplify (Or [q]) = simplifyQuery q simplify (Or qs) | same qs = simplify $ head qs | any (==Any) qs = Any -- all queryIsDate qs = Date $ spansUnion $ mapMaybe queryTermDateSpan qs ? | otherwise = Or $ map simplify $ filter (/=None) qs simplify (Date (DateSpan Nothing Nothing)) = Any simplify (Date2 (DateSpan Nothing Nothing)) = Any simplify q = q tests_simplifyQuery = [ "simplifyQuery" ~: do let q `gives` r = assertEqual "" r (simplifyQuery q) Or [Acct "a"] `gives` Acct "a" Or [Any,None] `gives` Any And [Any,None] `gives` None And [Any,Any] `gives` Any And [Acct "b",Any] `gives` Acct "b" And [Any,And [Date (DateSpan Nothing Nothing)]] `gives` Any And [Date (DateSpan Nothing (Just $ parsedate "2013-01-01")), Date (DateSpan (Just $ parsedate "2012-01-01") Nothing)] `gives` Date (DateSpan (Just $ parsedate "2012-01-01") (Just $ parsedate "2013-01-01")) And [Or [],Or [Desc "b b"]] `gives` Desc "b b" ] same [] = True same (a:as) = all (a==) as -- | Remove query terms (or whole sub-expressions) not matching the given -- predicate from this query. XXX Semantics not completely clear. filterQuery :: (Query -> Bool) -> Query -> Query filterQuery p = simplifyQuery . filterQuery' p filterQuery' :: (Query -> Bool) -> Query -> Query filterQuery' p (And qs) = And $ map (filterQuery p) qs filterQuery' p (Or qs) = Or $ map (filterQuery p) qs -- filterQuery' p (Not q) = Not $ filterQuery p q filterQuery' p q = if p q then q else Any tests_filterQuery = [ "filterQuery" ~: do let (q,p) `gives` r = assertEqual "" r (filterQuery p q) (Any, queryIsDepth) `gives` Any (Depth 1, queryIsDepth) `gives` Depth 1 (And [And [Status Cleared,Depth 1]], not . queryIsDepth) `gives` Status Cleared -- (And [Date nulldatespan, Not (Or [Any, Depth 1])], queryIsDepth) `gives` And [Not (Or [Depth 1])] ] -- * accessors -- | Does this query match everything ? queryIsNull :: Query -> Bool queryIsNull Any = True queryIsNull (And []) = True queryIsNull (Not (Or [])) = True queryIsNull _ = False queryIsDepth :: Query -> Bool queryIsDepth (Depth _) = True queryIsDepth _ = False queryIsDate :: Query -> Bool queryIsDate (Date _) = True queryIsDate _ = False queryIsDate2 :: Query -> Bool queryIsDate2 (Date2 _) = True queryIsDate2 _ = False queryIsDateOrDate2 :: Query -> Bool queryIsDateOrDate2 (Date _) = True queryIsDateOrDate2 (Date2 _) = True queryIsDateOrDate2 _ = False queryIsDesc :: Query -> Bool queryIsDesc (Desc _) = True queryIsDesc _ = False queryIsAcct :: Query -> Bool queryIsAcct (Acct _) = True queryIsAcct _ = False queryIsSym :: Query -> Bool queryIsSym (Sym _) = True queryIsSym _ = False queryIsReal :: Query -> Bool queryIsReal (Real _) = True queryIsReal _ = False queryIsStatus :: Query -> Bool queryIsStatus (Status _) = True queryIsStatus _ = False queryIsEmpty :: Query -> Bool queryIsEmpty (Empty _) = True queryIsEmpty _ = False -- | Does this query specify a start date and nothing else (that would -- filter postings prior to the date) ? -- When the flag is true, look for a starting secondary date instead. queryIsStartDateOnly :: Bool -> Query -> Bool queryIsStartDateOnly _ Any = False queryIsStartDateOnly _ None = False queryIsStartDateOnly secondary (Or ms) = and $ map (queryIsStartDateOnly secondary) ms queryIsStartDateOnly secondary (And ms) = and $ map (queryIsStartDateOnly secondary) ms queryIsStartDateOnly False (Date (DateSpan (Just _) _)) = True queryIsStartDateOnly True (Date2 (DateSpan (Just _) _)) = True queryIsStartDateOnly _ _ = False -- | What start date (or secondary date) does this query specify, if any ? -- For OR expressions, use the earliest of the dates. NOT is ignored. queryStartDate :: Bool -> Query -> Maybe Day queryStartDate secondary (Or ms) = earliestMaybeDate $ map (queryStartDate secondary) ms queryStartDate secondary (And ms) = latestMaybeDate $ map (queryStartDate secondary) ms queryStartDate False (Date (DateSpan (Just d) _)) = Just d queryStartDate True (Date2 (DateSpan (Just d) _)) = Just d queryStartDate _ _ = Nothing -- | What end date (or secondary date) does this query specify, if any ? -- For OR expressions, use the latest of the dates. NOT is ignored. queryEndDate :: Bool -> Query -> Maybe Day queryEndDate secondary (Or ms) = latestMaybeDate' $ map (queryEndDate secondary) ms queryEndDate secondary (And ms) = earliestMaybeDate' $ map (queryEndDate secondary) ms queryEndDate False (Date (DateSpan _ (Just d))) = Just d queryEndDate True (Date2 (DateSpan _ (Just d))) = Just d queryEndDate _ _ = Nothing queryTermDateSpan (Date span) = Just span queryTermDateSpan _ = Nothing -- | What date span (or secondary date span) does this query specify ? -- For OR expressions, use the widest possible span. NOT is ignored. queryDateSpan :: Bool -> Query -> DateSpan queryDateSpan secondary q = spansUnion $ queryDateSpans secondary q -- | Extract all date (or secondary date) spans specified in this query. -- NOT is ignored. queryDateSpans :: Bool -> Query -> [DateSpan] queryDateSpans secondary (Or qs) = concatMap (queryDateSpans secondary) qs queryDateSpans secondary (And qs) = concatMap (queryDateSpans secondary) qs queryDateSpans False (Date span) = [span] queryDateSpans True (Date2 span) = [span] queryDateSpans _ _ = [] -- | What date span (or secondary date span) does this query specify ? -- For OR expressions, use the widest possible span. NOT is ignored. queryDateSpan' :: Query -> DateSpan queryDateSpan' q = spansUnion $ queryDateSpans' q -- | Extract all date (or secondary date) spans specified in this query. -- NOT is ignored. queryDateSpans' :: Query -> [DateSpan] queryDateSpans' (Or qs) = concatMap queryDateSpans' qs queryDateSpans' (And qs) = concatMap queryDateSpans' qs queryDateSpans' (Date span) = [span] queryDateSpans' (Date2 span) = [span] queryDateSpans' _ = [] -- | What is the earliest of these dates, where Nothing is latest ? earliestMaybeDate :: [Maybe Day] -> Maybe Day earliestMaybeDate mds = head $ sortBy compareMaybeDates mds ++ [Nothing] -- | What is the latest of these dates, where Nothing is earliest ? latestMaybeDate :: [Maybe Day] -> Maybe Day latestMaybeDate = headDef Nothing . sortBy (flip compareMaybeDates) -- | What is the earliest of these dates, ignoring Nothings ? earliestMaybeDate' :: [Maybe Day] -> Maybe Day earliestMaybeDate' = headDef Nothing . sortBy compareMaybeDates . filter isJust -- | What is the latest of these dates, ignoring Nothings ? latestMaybeDate' :: [Maybe Day] -> Maybe Day latestMaybeDate' = headDef Nothing . sortBy (flip compareMaybeDates) . filter isJust -- | Compare two maybe dates, Nothing is earliest. compareMaybeDates :: Maybe Day -> Maybe Day -> Ordering compareMaybeDates Nothing Nothing = EQ compareMaybeDates Nothing (Just _) = LT compareMaybeDates (Just _) Nothing = GT compareMaybeDates (Just a) (Just b) = compare a b -- | The depth limit this query specifies, or a large number if none. queryDepth :: Query -> Int queryDepth q = case queryDepth' q of [] -> 99999 ds -> minimum ds where queryDepth' (Depth d) = [d] queryDepth' (Or qs) = concatMap queryDepth' qs queryDepth' (And qs) = concatMap queryDepth' qs queryDepth' _ = [] -- | The account we are currently focussed on, if any, and whether subaccounts are included. -- Just looks at the first query option. inAccount :: [QueryOpt] -> Maybe (AccountName,Bool) inAccount [] = Nothing inAccount (QueryOptInAcctOnly a:_) = Just (a,False) inAccount (QueryOptInAcct a:_) = Just (a,True) -- | A query for the account(s) we are currently focussed on, if any. -- Just looks at the first query option. inAccountQuery :: [QueryOpt] -> Maybe Query inAccountQuery [] = Nothing inAccountQuery (QueryOptInAcctOnly a : _) = Just $ Acct $ accountNameToAccountOnlyRegex a inAccountQuery (QueryOptInAcct a : _) = Just $ Acct $ accountNameToAccountRegex a -- -- | Convert a query to its inverse. -- negateQuery :: Query -> Query -- negateQuery = Not -- matching -- | Does the match expression match this account ? -- A matching in: clause is also considered a match. matchesAccount :: Query -> AccountName -> Bool matchesAccount (None) _ = False matchesAccount (Not m) a = not $ matchesAccount m a matchesAccount (Or ms) a = any (`matchesAccount` a) ms matchesAccount (And ms) a = all (`matchesAccount` a) ms matchesAccount (Acct r) a = regexMatchesCI r (T.unpack a) -- XXX pack matchesAccount (Depth d) a = accountNameLevel a <= d matchesAccount (Tag _ _) _ = False matchesAccount _ _ = True tests_matchesAccount = [ "matchesAccount" ~: do assertBool "positive acct match" $ matchesAccount (Acct "b:c") "a:bb:c:d" -- assertBool "acct should match at beginning" $ not $ matchesAccount (Acct True "a:b") "c:a:b" let q `matches` a = assertBool "" $ q `matchesAccount` a Depth 2 `matches` "a:b" assertBool "" $ Depth 2 `matchesAccount` "a" assertBool "" $ Depth 2 `matchesAccount` "a:b" assertBool "" $ not $ Depth 2 `matchesAccount` "a:b:c" assertBool "" $ Date nulldatespan `matchesAccount` "a" assertBool "" $ Date2 nulldatespan `matchesAccount` "a" assertBool "" $ not $ (Tag "a" Nothing) `matchesAccount` "a" ] matchesMixedAmount :: Query -> MixedAmount -> Bool matchesMixedAmount q (Mixed []) = q `matchesAmount` nullamt matchesMixedAmount q (Mixed as) = any (q `matchesAmount`) as -- | Does the match expression match this (simple) amount ? matchesAmount :: Query -> Amount -> Bool matchesAmount (Not q) a = not $ q `matchesAmount` a matchesAmount (Any) _ = True matchesAmount (None) _ = False matchesAmount (Or qs) a = any (`matchesAmount` a) qs matchesAmount (And qs) a = all (`matchesAmount` a) qs -- matchesAmount (Amt ord n) a = compareAmount ord n a matchesAmount (Sym r) a = regexMatchesCI ("^" ++ r ++ "$") $ T.unpack $ acommodity a -- matchesAmount _ _ = True -- | Is this simple (single-amount) mixed amount's quantity less than, greater than, equal to, or unsignedly equal to this number ? -- For multi-amount (multiple commodities, or just unsimplified) mixed amounts this is always true. -- | Is this amount's quantity less than, greater than, equal to, or unsignedly equal to this number ? compareAmount :: OrdPlus -> Quantity -> Amount -> Bool compareAmount ord q Amount{aquantity=aq} = case ord of Lt -> aq < q LtEq -> aq <= q Gt -> aq > q GtEq -> aq >= q Eq -> aq == q AbsLt -> abs aq < abs q AbsLtEq -> abs aq <= abs q AbsGt -> abs aq > abs q AbsGtEq -> abs aq >= abs q AbsEq -> abs aq == abs q -- | Does the match expression match this posting ? -- -- Note that for account match we try both original and effective account matchesPosting :: Query -> Posting -> Bool matchesPosting (Not q) p = not $ q `matchesPosting` p matchesPosting (Any) _ = True matchesPosting (None) _ = False matchesPosting (Or qs) p = any (`matchesPosting` p) qs matchesPosting (And qs) p = all (`matchesPosting` p) qs matchesPosting (Code r) p = regexMatchesCI r $ maybe "" (T.unpack . tcode) $ ptransaction p matchesPosting (Desc r) p = regexMatchesCI r $ maybe "" (T.unpack . tdescription) $ ptransaction p matchesPosting (Acct r) p = matchesPosting p || matchesPosting (originalPosting p) where matchesPosting p = regexMatchesCI r $ T.unpack $ paccount p -- XXX pack matchesPosting (Date span) p = span `spanContainsDate` postingDate p matchesPosting (Date2 span) p = span `spanContainsDate` postingDate2 p matchesPosting (Status Uncleared) p = postingStatus p /= Cleared matchesPosting (Status s) p = postingStatus p == s matchesPosting (Real v) p = v == isReal p matchesPosting q@(Depth _) Posting{paccount=a} = q `matchesAccount` a matchesPosting q@(Amt _ _) Posting{pamount=amt} = q `matchesMixedAmount` amt -- matchesPosting q@(Amt _ _) Posting{pamount=amt} = q `matchesMixedAmount` amt -- matchesPosting (Empty v) Posting{pamount=a} = v == isZeroMixedAmount a -- matchesPosting (Empty False) Posting{pamount=a} = True -- matchesPosting (Empty True) Posting{pamount=a} = isZeroMixedAmount a matchesPosting (Empty _) _ = True matchesPosting (Sym r) Posting{pamount=Mixed as} = any (regexMatchesCI $ "^" ++ r ++ "$") $ map (T.unpack . acommodity) as matchesPosting (Tag n v) p = not $ null $ matchedTags n v $ postingAllTags p -- matchesPosting _ _ = False tests_matchesPosting = [ "matchesPosting" ~: do -- matching posting status.. assertBool "positive match on cleared posting status" $ (Status Cleared) `matchesPosting` nullposting{pstatus=Cleared} assertBool "negative match on cleared posting status" $ not $ (Not $ Status Cleared) `matchesPosting` nullposting{pstatus=Cleared} assertBool "positive match on unclered posting status" $ (Status Uncleared) `matchesPosting` nullposting{pstatus=Uncleared} assertBool "negative match on unclered posting status" $ not $ (Not $ Status Uncleared) `matchesPosting` nullposting{pstatus=Uncleared} assertBool "positive match on true posting status acquired from transaction" $ (Status Cleared) `matchesPosting` nullposting{pstatus=Uncleared,ptransaction=Just nulltransaction{tstatus=Cleared}} assertBool "real:1 on real posting" $ (Real True) `matchesPosting` nullposting{ptype=RegularPosting} assertBool "real:1 on virtual posting fails" $ not $ (Real True) `matchesPosting` nullposting{ptype=VirtualPosting} assertBool "real:1 on balanced virtual posting fails" $ not $ (Real True) `matchesPosting` nullposting{ptype=BalancedVirtualPosting} assertBool "a" $ (Acct "'b") `matchesPosting` nullposting{paccount="'b"} assertBool "b" $ not $ (Tag "a" (Just "r$")) `matchesPosting` nullposting assertBool "c" $ (Tag "foo" Nothing) `matchesPosting` nullposting{ptags=[("foo","")]} assertBool "d" $ (Tag "foo" Nothing) `matchesPosting` nullposting{ptags=[("foo","baz")]} assertBool "e" $ (Tag "foo" (Just "a")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "f" $ not $ (Tag "foo" (Just "a$")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "g" $ not $ (Tag " foo " (Just "a")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "h" $ not $ (Tag "foo foo" (Just " ar ba ")) `matchesPosting` nullposting{ptags=[("foo foo","bar bar")]} -- a tag match on a posting also sees inherited tags assertBool "i" $ (Tag "txntag" Nothing) `matchesPosting` nullposting{ptransaction=Just nulltransaction{ttags=[("txntag","")]}} assertBool "j" $ not $ (Sym "$") `matchesPosting` nullposting{pamount=Mixed [usd 1]} -- becomes "^$$", ie testing for null symbol assertBool "k" $ (Sym "\\$") `matchesPosting` nullposting{pamount=Mixed [usd 1]} -- have to quote $ for regexpr assertBool "l" $ (Sym "shekels") `matchesPosting` nullposting{pamount=Mixed [amount{acommodity="shekels"}]} assertBool "m" $ not $ (Sym "shek") `matchesPosting` nullposting{pamount=Mixed [amount{acommodity="shekels"}]} ] -- | Does the match expression match this transaction ? matchesTransaction :: Query -> Transaction -> Bool matchesTransaction (Not q) t = not $ q `matchesTransaction` t matchesTransaction (Any) _ = True matchesTransaction (None) _ = False matchesTransaction (Or qs) t = any (`matchesTransaction` t) qs matchesTransaction (And qs) t = all (`matchesTransaction` t) qs matchesTransaction (Code r) t = regexMatchesCI r $ T.unpack $ tcode t matchesTransaction (Desc r) t = regexMatchesCI r $ T.unpack $ tdescription t matchesTransaction q@(Acct _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Date span) t = spanContainsDate span $ tdate t matchesTransaction (Date2 span) t = spanContainsDate span $ transactionDate2 t matchesTransaction (Status Uncleared) t = tstatus t /= Cleared matchesTransaction (Status s) t = tstatus t == s matchesTransaction (Real v) t = v == hasRealPostings t matchesTransaction q@(Amt _ _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Empty _) _ = True matchesTransaction (Depth d) t = any (Depth d `matchesPosting`) $ tpostings t matchesTransaction q@(Sym _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Tag n v) t = not $ null $ matchedTags n v $ transactionAllTags t -- matchesTransaction _ _ = False tests_matchesTransaction = [ "matchesTransaction" ~: do let q `matches` t = assertBool "" $ q `matchesTransaction` t Any `matches` nulltransaction assertBool "" $ not $ (Desc "x x") `matchesTransaction` nulltransaction{tdescription="x"} assertBool "" $ (Desc "x x") `matchesTransaction` nulltransaction{tdescription="x x"} -- see posting for more tag tests assertBool "" $ (Tag "foo" (Just "a")) `matchesTransaction` nulltransaction{ttags=[("foo","bar")]} -- a tag match on a transaction also matches posting tags assertBool "" $ (Tag "postingtag" Nothing) `matchesTransaction` nulltransaction{tpostings=[nullposting{ptags=[("postingtag","")]}]} ] -- | Filter a list of tags by matching against their names and -- optionally also their values. matchedTags :: Regexp -> Maybe Regexp -> [Tag] -> [Tag] matchedTags namepat valuepat tags = filter (match namepat valuepat) tags where match npat Nothing (n,_) = regexMatchesCI npat (T.unpack n) -- XXX match npat (Just vpat) (n,v) = regexMatchesCI npat (T.unpack n) && regexMatchesCI vpat (T.unpack v) -- tests tests_Hledger_Query :: Test tests_Hledger_Query = TestList $ tests_simplifyQuery ++ tests_words'' ++ tests_filterQuery ++ tests_parseQueryTerm ++ tests_parseAmountQueryTerm ++ tests_parseQuery ++ tests_matchesAccount ++ tests_matchesPosting ++ tests_matchesTransaction hledger-lib-1.2/Hledger/Read.hs0000644000000000000000000002615213067102102014477 0ustar0000000000000000{-| This is the entry point to hledger's reading system, which can read Journals from various data formats. Use this module if you want to parse journal data or read journal files. Generally it should not be necessary to import modules below this one. -} {-# LANGUAGE ScopedTypeVariables, OverloadedStrings #-} module Hledger.Read ( -- * Journal files PrefixedFilePath, defaultJournal, defaultJournalPath, readJournalFiles, readJournalFile, requireJournalFileExists, ensureJournalFileExists, splitReaderPrefix, -- * Journal parsing readJournal, readJournal', -- * Re-exported JournalReader.accountaliasp, JournalReader.postingp, module Hledger.Read.Common, -- * Tests samplejournal, tests_Hledger_Read, ) where import Control.Applicative ((<|>)) import Control.Arrow (right) import qualified Control.Exception as C import Control.Monad.Except import Data.List import Data.Maybe import Data.Text (Text) import qualified Data.Text as T import Safe import System.Directory (doesFileExist, getHomeDirectory) import System.Environment (getEnv) import System.Exit (exitFailure) import System.FilePath ((), takeExtension) import System.IO (stderr) import Test.HUnit import Text.Printf import Hledger.Data.Dates (getCurrentDay) import Hledger.Data.Types import Hledger.Read.Common import qualified Hledger.Read.JournalReader as JournalReader -- import qualified Hledger.Read.LedgerReader as LedgerReader import qualified Hledger.Read.TimedotReader as TimedotReader import qualified Hledger.Read.TimeclockReader as TimeclockReader import qualified Hledger.Read.CsvReader as CsvReader import Hledger.Utils import Prelude hiding (getContents, writeFile) import Hledger.Utils.UTF8IOCompat (writeFile) journalEnvVar = "LEDGER_FILE" journalEnvVar2 = "LEDGER" journalDefaultFilename = ".hledger.journal" -- The available journal readers, each one handling a particular data format. readers :: [Reader] readers = [ JournalReader.reader ,TimeclockReader.reader ,TimedotReader.reader ,CsvReader.reader -- ,LedgerReader.reader ] readerNames :: [String] readerNames = map rFormat readers -- | A file path optionally prefixed by a reader name and colon -- (journal:, csv:, timedot:, etc.). type PrefixedFilePath = FilePath -- | Read the default journal file specified by the environment, or raise an error. defaultJournal :: IO Journal defaultJournal = defaultJournalPath >>= readJournalFile Nothing Nothing True >>= either error' return -- | Get the default journal file path specified by the environment. -- Like ledger, we look first for the LEDGER_FILE environment -- variable, and if that does not exist, for the legacy LEDGER -- environment variable. If neither is set, or the value is blank, -- return the hard-coded default, which is @.hledger.journal@ in the -- users's home directory (or in the current directory, if we cannot -- determine a home directory). defaultJournalPath :: IO String defaultJournalPath = do s <- envJournalPath if null s then defaultJournalPath else return s where envJournalPath = getEnv journalEnvVar `C.catch` (\(_::C.IOException) -> getEnv journalEnvVar2 `C.catch` (\(_::C.IOException) -> return "")) defaultJournalPath = do home <- getHomeDirectory `C.catch` (\(_::C.IOException) -> return "") return $ home journalDefaultFilename -- | @readJournalFiles mformat mrulesfile assrt prefixedfiles@ -- -- Read a Journal from each specified file path and combine them into one. -- Or, return the first error message. -- -- Combining Journals means concatenating them, basically. -- The parse state resets at the start of each file, which means that -- directives & aliases do not cross file boundaries. -- (The final parse state saved in the Journal does span all files, however.) -- -- As with readJournalFile, -- file paths can optionally have a READER: prefix, -- and the @mformat@, @mrulesfile, and @assrt@ arguments are supported -- (and these are applied to all files). -- readJournalFiles :: Maybe StorageFormat -> Maybe FilePath -> Bool -> [PrefixedFilePath] -> IO (Either String Journal) readJournalFiles mformat mrulesfile assrt prefixedfiles = do (right mconcat1 . sequence) <$> mapM (readJournalFile mformat mrulesfile assrt) prefixedfiles where mconcat1 :: Monoid t => [t] -> t mconcat1 [] = mempty mconcat1 x = foldr1 mappend x -- | @readJournalFile mformat mrulesfile assrt prefixedfile@ -- -- Read a Journal from this file, or from stdin if the file path is -, -- or return an error message. The file path can have a READER: prefix. -- -- The reader (data format) is chosen based on (in priority order): -- the @mformat@ argument; -- the file path's READER: prefix, if any; -- a recognised file name extension (in readJournal); -- if none of these identify a known reader, all built-in readers are tried in turn. -- -- A CSV conversion rules file (@mrulesfiles@) can be specified to help convert CSV data. -- -- Optionally, any balance assertions in the journal can be checked (@assrt@). -- readJournalFile :: Maybe StorageFormat -> Maybe FilePath -> Bool -> PrefixedFilePath -> IO (Either String Journal) readJournalFile mformat mrulesfile assrt prefixedfile = do let (mprefixformat, f) = splitReaderPrefix prefixedfile mfmt = mformat <|> mprefixformat requireJournalFileExists f readFileOrStdinAnyLineEnding f >>= readJournal mfmt mrulesfile assrt (Just f) -- | If a filepath is prefixed by one of the reader names and a colon, -- split that off. Eg "csv:-" -> (Just "csv", "-"). splitReaderPrefix :: PrefixedFilePath -> (Maybe String, FilePath) splitReaderPrefix f = headDef (Nothing, f) [(Just r, drop (length r + 1) f) | r <- readerNames, (r++":") `isPrefixOf` f] -- | If the specified journal file does not exist (and is not "-"), -- give a helpful error and quit. requireJournalFileExists :: FilePath -> IO () requireJournalFileExists "-" = return () requireJournalFileExists f = do exists <- doesFileExist f when (not exists) $ do -- XXX might not be a journal file hPrintf stderr "The hledger journal file \"%s\" was not found.\n" f hPrintf stderr "Please create it first, eg with \"hledger add\" or a text editor.\n" hPrintf stderr "Or, specify an existing journal file with -f or LEDGER_FILE.\n" exitFailure -- | Ensure there is a journal file at the given path, creating an empty one if needed. ensureJournalFileExists :: FilePath -> IO () ensureJournalFileExists f = do exists <- doesFileExist f when (not exists) $ do hPrintf stderr "Creating hledger journal file %s.\n" f -- note Hledger.Utils.UTF8.* do no line ending conversion on windows, -- we currently require unix line endings on all platforms. newJournalContent >>= writeFile f -- | Give the content for a new auto-created journal file. newJournalContent :: IO String newJournalContent = do d <- getCurrentDay return $ printf "; journal created %s by hledger\n" (show d) -- | Read a Journal from the given text trying all readers in turn, or throw an error. readJournal' :: Text -> IO Journal readJournal' t = readJournal Nothing Nothing True Nothing t >>= either error' return tests_readJournal' = [ "readJournal' parses sample journal" ~: do _ <- samplejournal assertBool "" True ] -- | @readJournal mformat mrulesfile assrt mfile txt@ -- -- Read a Journal from some text, or return an error message. -- -- The reader (data format) is chosen based on (in priority order): -- the @mformat@ argument; -- a recognised file name extension in @mfile@ (if provided). -- If none of these identify a known reader, all built-in readers are tried in turn -- (returning the first one's error message if none of them succeed). -- -- A CSV conversion rules file (@mrulesfiles@) can be specified to help convert CSV data. -- -- Optionally, any balance assertions in the journal can be checked (@assrt@). -- readJournal :: Maybe StorageFormat -> Maybe FilePath -> Bool -> Maybe FilePath -> Text -> IO (Either String Journal) readJournal mformat mrulesfile assrt mfile txt = let stablereaders = filter (not.rExperimental) readers rs = maybe stablereaders (:[]) $ findReader mformat mfile in tryReaders rs mrulesfile assrt mfile txt -- | @findReader mformat mpath@ -- -- Find the reader named by @mformat@, if provided. -- Or, if a file path is provided, find the first reader that handles -- its file extension, if any. findReader :: Maybe StorageFormat -> Maybe FilePath -> Maybe Reader findReader Nothing Nothing = Nothing findReader (Just fmt) _ = headMay [r | r <- readers, rFormat r == fmt] findReader Nothing (Just path) = case prefix of Just fmt -> headMay [r | r <- readers, rFormat r == fmt] Nothing -> headMay [r | r <- readers, ext `elem` rExtensions r] where (prefix,path') = splitReaderPrefix path ext = drop 1 $ takeExtension path' -- | @tryReaders readers mrulesfile assrt path t@ -- -- Try to parse the given text to a Journal using each reader in turn, -- returning the first success, or if all of them fail, the first error message. tryReaders :: [Reader] -> Maybe FilePath -> Bool -> Maybe FilePath -> Text -> IO (Either String Journal) tryReaders readers mrulesfile assrt path t = firstSuccessOrFirstError [] readers where firstSuccessOrFirstError :: [String] -> [Reader] -> IO (Either String Journal) firstSuccessOrFirstError [] [] = return $ Left "no readers found" firstSuccessOrFirstError errs (r:rs) = do dbg1IO "trying reader" (rFormat r) result <- (runExceptT . (rParser r) mrulesfile assrt path') t dbg1IO "reader result" $ either id show result case result of Right j -> return $ Right j -- success! Left e -> firstSuccessOrFirstError (errs++[e]) rs -- keep trying firstSuccessOrFirstError (e:_) [] = return $ Left e -- none left, return first error path' = fromMaybe "(string)" path -- tests samplejournal = readJournal' $ T.unlines ["2008/01/01 income" ," assets:bank:checking $1" ," income:salary" ,"" ,"comment" ,"multi line comment here" ,"for testing purposes" ,"end comment" ,"" ,"2008/06/01 gift" ," assets:bank:checking $1" ," income:gifts" ,"" ,"2008/06/02 save" ," assets:bank:saving $1" ," assets:bank:checking" ,"" ,"2008/06/03 * eat & shop" ," expenses:food $1" ," expenses:supplies $1" ," assets:cash" ,"" ,"2008/12/31 * pay off" ," liabilities:debts $1" ," assets:bank:checking" ] tests_Hledger_Read = TestList $ tests_readJournal' ++ [ JournalReader.tests_Hledger_Read_JournalReader, -- LedgerReader.tests_Hledger_Read_LedgerReader, TimeclockReader.tests_Hledger_Read_TimeclockReader, TimedotReader.tests_Hledger_Read_TimedotReader, CsvReader.tests_Hledger_Read_CsvReader, "journal" ~: do r <- runExceptT $ parseWithState mempty JournalReader.journalp "" assertBool "journalp should parse an empty file" (isRight $ r) jE <- readJournal Nothing Nothing True Nothing "" -- don't know how to get it from journal either error' (assertBool "journalp parsing an empty file should give an empty journal" . null . jtxns) jE ] hledger-lib-1.2/Hledger/Read/Common.hs0000644000000000000000000010016513066173044015740 0ustar0000000000000000--- * doc -- Lines beginning "--- *" are collapsible orgstruct nodes. Emacs users, -- (add-hook 'haskell-mode-hook -- (lambda () (set-variable 'orgstruct-heading-prefix-regexp "--- " t)) -- 'orgstruct-mode) -- and press TAB on nodes to expand/collapse. {-| Some common parsers and helpers used by several readers. Some of these might belong in Hledger.Read.JournalReader or Hledger.Read. -} --- * module {-# LANGUAGE CPP, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings #-} module Hledger.Read.Common where --- * imports import Prelude () import Prelude.Compat hiding (readFile) import Control.Monad.Compat import Control.Monad.Except (ExceptT(..), runExceptT, throwError) --, catchError) import Control.Monad.State.Strict import Data.Char (isNumber) import Data.Functor.Identity import Data.List.Compat import Data.List.NonEmpty (NonEmpty(..)) import Data.List.Split (wordsBy) import Data.Maybe import Data.Monoid import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Data.Time.LocalTime import Safe import System.Time (getClockTime) import Text.Megaparsec hiding (parse,State) import Text.Megaparsec.Text import Hledger.Data import Hledger.Utils -- $setup --- * parsing utils -- | Run a string parser with no state in the identity monad. runTextParser, rtp :: TextParser Identity a -> Text -> Either (ParseError Char Dec) a runTextParser p t = runParser p "" t rtp = runTextParser -- | Run a journal parser with a null journal-parsing state. runJournalParser, rjp :: Monad m => TextParser m a -> Text -> m (Either (ParseError Char Dec) a) runJournalParser p t = runParserT p "" t rjp = runJournalParser -- | Run an error-raising journal parser with a null journal-parsing state. runErroringJournalParser, rejp :: Monad m => ErroringJournalParser m a -> Text -> m (Either String a) runErroringJournalParser p t = runExceptT $ runJournalParser (evalStateT p mempty) t >>= either (throwError . parseErrorPretty) return rejp = runErroringJournalParser genericSourcePos :: SourcePos -> GenericSourcePos genericSourcePos p = GenericSourcePos (sourceName p) (fromIntegral . unPos $ sourceLine p) (fromIntegral . unPos $ sourceColumn p) journalSourcePos :: SourcePos -> SourcePos -> GenericSourcePos journalSourcePos p p' = JournalSourcePos (sourceName p) (fromIntegral . unPos $ sourceLine p, fromIntegral $ line') where line' | (unPos $ sourceColumn p') == 1 = unPos (sourceLine p') - 1 | otherwise = unPos $ sourceLine p' -- might be at end of file withat last new-line -- | Given a megaparsec ParsedJournal parser, balance assertion flag, file -- path and file content: parse and post-process a Journal, or give an error. parseAndFinaliseJournal :: ErroringJournalParser IO ParsedJournal -> Bool -> FilePath -> Text -> ExceptT String IO Journal parseAndFinaliseJournal parser assrt f txt = do t <- liftIO getClockTime y <- liftIO getCurrentYear ep <- runParserT (evalStateT parser nulljournal {jparsedefaultyear=Just y}) f txt case ep of Right pj -> case journalFinalise t f txt assrt pj of Right j -> return j Left e -> throwError e Left e -> throwError $ parseErrorPretty e parseAndFinaliseJournal' :: JournalParser ParsedJournal -> Bool -> FilePath -> Text -> ExceptT String IO Journal parseAndFinaliseJournal' parser assrt f txt = do t <- liftIO getClockTime y <- liftIO getCurrentYear let ep = runParser (evalStateT parser nulljournal {jparsedefaultyear=Just y}) f txt case ep of Right pj -> case journalFinalise t f txt assrt pj of Right j -> return j Left e -> throwError e Left e -> throwError $ parseErrorPretty e setYear :: Year -> JournalStateParser m () setYear y = modify' (\j -> j{jparsedefaultyear=Just y}) getYear :: JournalStateParser m (Maybe Year) getYear = fmap jparsedefaultyear get setDefaultCommodityAndStyle :: (CommoditySymbol,AmountStyle) -> JournalStateParser m () setDefaultCommodityAndStyle cs = modify' (\j -> j{jparsedefaultcommodity=Just cs}) getDefaultCommodityAndStyle :: JournalStateParser m (Maybe (CommoditySymbol,AmountStyle)) getDefaultCommodityAndStyle = jparsedefaultcommodity `fmap` get pushAccount :: AccountName -> JournalStateParser m () pushAccount acct = modify' (\j -> j{jaccounts = acct : jaccounts j}) pushParentAccount :: AccountName -> JournalStateParser m () pushParentAccount acct = modify' (\j -> j{jparseparentaccounts = acct : jparseparentaccounts j}) popParentAccount :: JournalStateParser m () popParentAccount = do j <- get case jparseparentaccounts j of [] -> unexpected (Tokens ('E' :| "nd of apply account block with no beginning")) (_:rest) -> put j{jparseparentaccounts=rest} getParentAccount :: JournalStateParser m AccountName getParentAccount = fmap (concatAccountNames . reverse . jparseparentaccounts) get addAccountAlias :: MonadState Journal m => AccountAlias -> m () addAccountAlias a = modify' (\(j@Journal{..}) -> j{jparsealiases=a:jparsealiases}) getAccountAliases :: MonadState Journal m => m [AccountAlias] getAccountAliases = fmap jparsealiases get clearAccountAliases :: MonadState Journal m => m () clearAccountAliases = modify' (\(j@Journal{..}) -> j{jparsealiases=[]}) -- getTransactionCount :: MonadState Journal m => m Integer -- getTransactionCount = fmap jparsetransactioncount get -- -- setTransactionCount :: MonadState Journal m => Integer -> m () -- setTransactionCount i = modify' (\j -> j{jparsetransactioncount=i}) -- -- -- | Increment the transaction index by one and return the new value. -- incrementTransactionCount :: MonadState Journal m => m Integer -- incrementTransactionCount = do -- modify' (\j -> j{jparsetransactioncount=jparsetransactioncount j + 1}) -- getTransactionCount journalAddFile :: (FilePath,Text) -> Journal -> Journal journalAddFile f j@Journal{jfiles=fs} = j{jfiles=fs++[f]} -- append, unlike the other fields, even though we do a final reverse, -- to compensate for additional reversal due to including/monoid-concatting -- -- | Terminate parsing entirely, returning the given error message -- -- with the current parse position prepended. -- parserError :: String -> ErroringJournalParser a -- parserError s = do -- pos <- getPosition -- parserErrorAt pos s -- | Terminate parsing entirely, returning the given error message -- with the given parse position prepended. parserErrorAt :: Monad m => SourcePos -> String -> ErroringJournalParser m a parserErrorAt pos s = throwError $ sourcePosPretty pos ++ ":\n" ++ s --- * parsers --- ** transaction bits statusp :: TextParser m ClearedStatus statusp = choice' [ many spacenonewline >> char '*' >> return Cleared , many spacenonewline >> char '!' >> return Pending , return Uncleared ] "cleared status" codep :: TextParser m String codep = try (do { some spacenonewline; char '(' "codep"; anyChar `manyTill` char ')' } ) <|> return "" descriptionp :: JournalStateParser m String descriptionp = many (noneOf (";\n" :: [Char])) --- ** dates -- | Parse a date in YYYY/MM/DD format. -- Hyphen (-) and period (.) are also allowed as separators. -- The year may be omitted if a default year has been set. -- Leading zeroes may be omitted. datep :: JournalStateParser m Day datep = do -- hacky: try to ensure precise errors for invalid dates -- XXX reported error position is not too good -- pos <- genericSourcePos <$> getPosition datestr <- do c <- digitChar cs <- lift $ many $ choice' [digitChar, datesepchar] return $ c:cs let sepchars = nub $ sort $ filter (`elem` datesepchars) datestr when (length sepchars /= 1) $ fail $ "bad date, different separators used: " ++ datestr let dateparts = wordsBy (`elem` datesepchars) datestr currentyear <- getYear [y,m,d] <- case (dateparts,currentyear) of ([m,d],Just y) -> return [show y,m,d] ([_,_],Nothing) -> fail $ "partial date "++datestr++" found, but the current year is unknown" ([y,m,d],_) -> return [y,m,d] _ -> fail $ "bad date: " ++ datestr let maybedate = fromGregorianValid (read y) (read m) (read d) case maybedate of Nothing -> fail $ "bad date: " ++ datestr Just date -> return date "full or partial date" -- | Parse a date and time in YYYY/MM/DD HH:MM[:SS][+-ZZZZ] format. -- Hyphen (-) and period (.) are also allowed as date separators. -- The year may be omitted if a default year has been set. -- Seconds are optional. -- The timezone is optional and ignored (the time is always interpreted as a local time). -- Leading zeroes may be omitted (except in a timezone). datetimep :: JournalStateParser m LocalTime datetimep = do day <- datep lift $ some spacenonewline h <- some digitChar let h' = read h guard $ h' >= 0 && h' <= 23 char ':' m <- some digitChar let m' = read m guard $ m' >= 0 && m' <= 59 s <- optional $ char ':' >> some digitChar let s' = case s of Just sstr -> read sstr Nothing -> 0 guard $ s' >= 0 && s' <= 59 {- tz <- -} optional $ do plusminus <- oneOf ("-+" :: [Char]) d1 <- digitChar d2 <- digitChar d3 <- digitChar d4 <- digitChar return $ plusminus:d1:d2:d3:d4:"" -- ltz <- liftIO $ getCurrentTimeZone -- let tz' = maybe ltz (fromMaybe ltz . parseTime defaultTimeLocale "%z") tz -- return $ localTimeToUTC tz' $ LocalTime day $ TimeOfDay h' m' (fromIntegral s') return $ LocalTime day $ TimeOfDay h' m' (fromIntegral s') secondarydatep :: Day -> JournalStateParser m Day secondarydatep primarydate = do char '=' -- kludgy way to use primary date for default year let withDefaultYear d p = do y <- getYear let (y',_,_) = toGregorian d in setYear y' r <- p when (isJust y) $ setYear $ fromJust y -- XXX -- mapM setYear <$> y return r withDefaultYear primarydate datep -- | -- >> parsewith twoorthreepartdatestringp "2016/01/2" -- Right "2016/01/2" -- twoorthreepartdatestringp = do -- n1 <- some digitChar -- c <- datesepchar -- n2 <- some digitChar -- mn3 <- optional $ char c >> some digitChar -- return $ n1 ++ c:n2 ++ maybe "" (c:) mn3 --- ** account names -- | Parse an account name, then apply any parent account prefix and/or account aliases currently in effect. modifiedaccountnamep :: JournalStateParser m AccountName modifiedaccountnamep = do parent <- getParentAccount aliases <- getAccountAliases a <- lift accountnamep return $ accountNameApplyAliases aliases $ -- XXX accountNameApplyAliasesMemo ? doesn't seem to make a difference joinAccountNames parent a -- | Parse an account name. Account names start with a non-space, may -- have single spaces inside them, and are terminated by two or more -- spaces (or end of input). Also they have one or more components of -- at least one character, separated by the account separator char. -- (This parser will also consume one following space, if present.) accountnamep :: TextParser m AccountName accountnamep = do astr <- do c <- nonspace cs <- striptrailingspace <$> many (nonspace <|> singlespace) return $ c:cs let a = T.pack astr when (accountNameFromComponents (accountNameComponents a) /= a) (fail $ "account name seems ill-formed: "++astr) return a where singlespace = try (do {spacenonewline; do {notFollowedBy spacenonewline; return ' '}}) striptrailingspace "" = "" striptrailingspace s = if last s == ' ' then init s else s -- accountnamechar = notFollowedBy (oneOf "()[]") >> nonspace -- "account name character (non-bracket, non-parenthesis, non-whitespace)" --- ** amounts -- | Parse whitespace then an amount, with an optional left or right -- currency symbol and optional price, or return the special -- "missing" marker amount. spaceandamountormissingp :: Monad m => JournalStateParser m MixedAmount spaceandamountormissingp = try (do lift $ some spacenonewline (Mixed . (:[])) `fmap` amountp <|> return missingmixedamt ) <|> return missingmixedamt #ifdef TESTS assertParseEqual' :: (Show a, Eq a) => (Either ParseError a) -> a -> Assertion assertParseEqual' parse expected = either (assertFailure.show) (`is'` expected) parse is' :: (Eq a, Show a) => a -> a -> Assertion a `is'` e = assertEqual e a test_spaceandamountormissingp = do assertParseEqual' (parseWithState mempty spaceandamountormissingp " $47.18") (Mixed [usd 47.18]) assertParseEqual' (parseWithState mempty spaceandamountormissingp "$47.18") missingmixedamt assertParseEqual' (parseWithState mempty spaceandamountormissingp " ") missingmixedamt assertParseEqual' (parseWithState mempty spaceandamountormissingp "") missingmixedamt #endif -- | Parse a single-commodity amount, with optional symbol on the left or -- right, optional unit or total price, and optional (ignored) -- ledger-style balance assertion or fixed lot price declaration. amountp :: Monad m => JournalStateParser m Amount amountp = try leftsymbolamountp <|> try rightsymbolamountp <|> nosymbolamountp #ifdef TESTS test_amountp = do assertParseEqual' (parseWithState mempty amountp "$47.18") (usd 47.18) assertParseEqual' (parseWithState mempty amountp "$1.") (usd 1 `withPrecision` 0) -- ,"amount with unit price" ~: do assertParseEqual' (parseWithState mempty amountp "$10 @ €0.5") (usd 10 `withPrecision` 0 `at` (eur 0.5 `withPrecision` 1)) -- ,"amount with total price" ~: do assertParseEqual' (parseWithState mempty amountp "$10 @@ €5") (usd 10 `withPrecision` 0 @@ (eur 5 `withPrecision` 0)) #endif -- | Parse an amount from a string, or get an error. amountp' :: String -> Amount amountp' s = case runParser (evalStateT (amountp <* eof) mempty) "" (T.pack s) of Right amt -> amt Left err -> error' $ show err -- XXX should throwError -- | Parse a mixed amount from a string, or get an error. mamountp' :: String -> MixedAmount mamountp' = Mixed . (:[]) . amountp' signp :: TextParser m String signp = do sign <- optional $ oneOf ("+-" :: [Char]) return $ case sign of Just '-' -> "-" _ -> "" leftsymbolamountp :: Monad m => JournalStateParser m Amount leftsymbolamountp = do sign <- lift signp c <- lift commoditysymbolp sp <- lift $ many spacenonewline (q,prec,mdec,mgrps) <- lift numberp let s = amountstyle{ascommodityside=L, ascommodityspaced=not $ null sp, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} p <- priceamountp let applysign = if sign=="-" then negate else id return $ applysign $ Amount c q p s "left-symbol amount" rightsymbolamountp :: Monad m => JournalStateParser m Amount rightsymbolamountp = do (q,prec,mdec,mgrps) <- lift numberp sp <- lift $ many spacenonewline c <- lift commoditysymbolp p <- priceamountp let s = amountstyle{ascommodityside=R, ascommodityspaced=not $ null sp, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} return $ Amount c q p s "right-symbol amount" nosymbolamountp :: Monad m => JournalStateParser m Amount nosymbolamountp = do (q,prec,mdec,mgrps) <- lift numberp p <- priceamountp -- apply the most recently seen default commodity and style to this commodityless amount defcs <- getDefaultCommodityAndStyle let (c,s) = case defcs of Just (defc,defs) -> (defc, defs{asprecision=max (asprecision defs) prec}) Nothing -> ("", amountstyle{asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}) return $ Amount c q p s "no-symbol amount" commoditysymbolp :: TextParser m CommoditySymbol commoditysymbolp = (quotedcommoditysymbolp <|> simplecommoditysymbolp) "commodity symbol" quotedcommoditysymbolp :: TextParser m CommoditySymbol quotedcommoditysymbolp = do char '"' s <- some $ noneOf (";\n\"" :: [Char]) char '"' return $ T.pack s simplecommoditysymbolp :: TextParser m CommoditySymbol simplecommoditysymbolp = T.pack <$> some (noneOf nonsimplecommoditychars) priceamountp :: Monad m => JournalStateParser m Price priceamountp = try (do lift (many spacenonewline) char '@' try (do char '@' lift (many spacenonewline) a <- amountp -- XXX can parse more prices ad infinitum, shouldn't return $ TotalPrice a) <|> (do lift (many spacenonewline) a <- amountp -- XXX can parse more prices ad infinitum, shouldn't return $ UnitPrice a)) <|> return NoPrice partialbalanceassertionp :: Monad m => JournalStateParser m (Maybe Amount) partialbalanceassertionp = try (do lift (many spacenonewline) char '=' lift (many spacenonewline) a <- amountp -- XXX should restrict to a simple amount return $ Just $ a) <|> return Nothing -- balanceassertion :: Monad m => TextParser m (Maybe MixedAmount) -- balanceassertion = -- try (do -- lift (many spacenonewline) -- string "==" -- lift (many spacenonewline) -- a <- amountp -- XXX should restrict to a simple amount -- return $ Just $ Mixed [a]) -- <|> return Nothing -- http://ledger-cli.org/3.0/doc/ledger3.html#Fixing-Lot-Prices fixedlotpricep :: Monad m => JournalStateParser m (Maybe Amount) fixedlotpricep = try (do lift (many spacenonewline) char '{' lift (many spacenonewline) char '=' lift (many spacenonewline) a <- amountp -- XXX should restrict to a simple amount lift (many spacenonewline) char '}' return $ Just a) <|> return Nothing -- | Parse a string representation of a number for its value and display -- attributes. -- -- Some international number formats are accepted, eg either period or comma -- may be used for the decimal point, and the other of these may be used for -- separating digit groups in the integer part. See -- http://en.wikipedia.org/wiki/Decimal_separator for more examples. -- -- This returns: the parsed numeric value, the precision (number of digits -- seen following the decimal point), the decimal point character used if any, -- and the digit group style if any. -- numberp :: TextParser m (Quantity, Int, Maybe Char, Maybe DigitGroupStyle) numberp = do -- a number is an optional sign followed by a sequence of digits possibly -- interspersed with periods, commas, or both -- ptrace "numberp" sign <- signp parts <- some $ choice' [some digitChar, some $ char ',', some $ char '.'] dbg8 "numberp parsed" (sign,parts) `seq` return () -- check the number is well-formed and identify the decimal point and digit -- group separator characters used, if any let (numparts, puncparts) = partition numeric parts (ok, mdecimalpoint, mseparator) = case (numparts, puncparts) of ([],_) -> (False, Nothing, Nothing) -- no digits, not ok (_,[]) -> (True, Nothing, Nothing) -- digits with no punctuation, ok (_,[[d]]) -> (True, Just d, Nothing) -- just a single punctuation of length 1, assume it's a decimal point (_,[_]) -> (False, Nothing, Nothing) -- a single punctuation of some other length, not ok (_,_:_:_) -> -- two or more punctuations let (s:ss, d) = (init puncparts, last puncparts) -- the leftmost is a separator and the rightmost may be a decimal point in if any ((/=1).length) puncparts -- adjacent punctuation chars, not ok || any (s/=) ss -- separator chars vary, not ok || head parts == s -- number begins with a separator char, not ok then (False, Nothing, Nothing) else if s == d then (True, Nothing, Just $ head s) -- just one kind of punctuation - must be separators else (True, Just $ head d, Just $ head s) -- separator(s) and a decimal point unless ok $ fail $ "number seems ill-formed: "++concat parts -- get the digit group sizes and digit group style if any let (intparts',fracparts') = span ((/= mdecimalpoint) . Just . head) parts (intparts, fracpart) = (filter numeric intparts', filter numeric fracparts') groupsizes = reverse $ case map length intparts of (a:b:cs) | a < b -> b:cs gs -> gs mgrps = (`DigitGroups` groupsizes) <$> mseparator -- put the parts back together without digit group separators, get the precision and parse the value let int = concat $ "":intparts frac = concat $ "":fracpart precision = length frac int' = if null int then "0" else int frac' = if null frac then "0" else frac quantity = read $ sign++int'++"."++frac' -- this read should never fail return $ dbg8 "numberp quantity,precision,mdecimalpoint,mgrps" (quantity,precision,mdecimalpoint,mgrps) "numberp" where numeric = isNumber . headDef '_' -- test_numberp = do -- let s `is` n = assertParseEqual (parseWithState mempty numberp s) n -- assertFails = assertBool . isLeft . parseWithState mempty numberp -- assertFails "" -- "0" `is` (0, 0, '.', ',', []) -- "1" `is` (1, 0, '.', ',', []) -- "1.1" `is` (1.1, 1, '.', ',', []) -- "1,000.1" `is` (1000.1, 1, '.', ',', [3]) -- "1.00.000,1" `is` (100000.1, 1, ',', '.', [3,2]) -- "1,000,000" `is` (1000000, 0, '.', ',', [3,3]) -- "1." `is` (1, 0, '.', ',', []) -- "1," `is` (1, 0, ',', '.', []) -- ".1" `is` (0.1, 1, '.', ',', []) -- ",1" `is` (0.1, 1, ',', '.', []) -- assertFails "1,000.000,1" -- assertFails "1.000,000.1" -- assertFails "1,000.000.1" -- assertFails "1,,1" -- assertFails "1..1" -- assertFails ".1," -- assertFails ",1." --- ** comments multilinecommentp :: JournalStateParser m () multilinecommentp = do string "comment" >> lift (many spacenonewline) >> newline go where go = try (eof <|> (string "end comment" >> newline >> return ())) <|> (anyLine >> go) anyLine = anyChar `manyTill` newline emptyorcommentlinep :: JournalStateParser m () emptyorcommentlinep = do lift (many spacenonewline) >> (commentp <|> (lift (many spacenonewline) >> newline >> return "")) return () -- | Parse a possibly multi-line comment following a semicolon. followingcommentp :: JournalStateParser m Text followingcommentp = -- ptrace "followingcommentp" do samelinecomment <- lift (many spacenonewline) >> (try semicoloncommentp <|> (newline >> return "")) newlinecomments <- many (try (lift (some spacenonewline) >> semicoloncommentp)) return $ T.unlines $ samelinecomment:newlinecomments -- | Parse a possibly multi-line comment following a semicolon, and -- any tags and/or posting dates within it. Posting dates can be -- expressed with "date"/"date2" tags and/or bracketed dates. The -- dates are parsed in full here so that errors are reported in the -- right position. Missing years can be inferred if a default date is -- provided. -- -- >>> rejp (followingcommentandtagsp (Just $ fromGregorian 2000 1 2)) "; a:b, date:3/4, [=5/6]" -- Right ("a:b, date:3/4, [=5/6]\n",[("a","b"),("date","3/4")],Just 2000-03-04,Just 2000-05-06) -- -- Year unspecified and no default provided -> unknown year error, at correct position: -- >>> rejp (followingcommentandtagsp Nothing) " ; xxx date:3/4\n ; second line" -- Left ...1:22...partial date 3/4 found, but the current year is unknown... -- -- Date tag value contains trailing text - forgot the comma, confused: -- the syntaxes ? We'll accept the leading date anyway -- >>> rejp (followingcommentandtagsp (Just $ fromGregorian 2000 1 2)) "; date:3/4=5/6" -- Right ("date:3/4=5/6\n",[("date","3/4=5/6")],Just 2000-03-04,Nothing) -- followingcommentandtagsp :: MonadIO m => Maybe Day -> ErroringJournalParser m (Text, [Tag], Maybe Day, Maybe Day) followingcommentandtagsp mdefdate = do -- pdbg 0 "followingcommentandtagsp" -- Parse a single or multi-line comment, starting on this line or the next one. -- Save the starting position and preserve all whitespace for the subsequent re-parsing, -- to get good error positions. startpos <- getPosition commentandwhitespace :: String <- do let semicoloncommentp' = (:) <$> char ';' <*> anyChar `manyTill` eolof sp1 <- lift (many spacenonewline) l1 <- try (lift semicoloncommentp') <|> (newline >> return "") ls <- lift . many $ try ((++) <$> some spacenonewline <*> semicoloncommentp') return $ unlines $ (sp1 ++ l1) : ls let comment = T.pack $ unlines $ map (lstrip . dropWhile (==';') . strip) $ lines commentandwhitespace -- pdbg 0 $ "commentws:"++show commentandwhitespace -- pdbg 0 $ "comment:"++show comment -- Reparse the comment for any tags. tags <- case runTextParser (setPosition startpos >> tagsp) $ T.pack commentandwhitespace of Right ts -> return ts Left e -> throwError $ parseErrorPretty e -- pdbg 0 $ "tags: "++show tags -- Reparse the comment for any posting dates. Use the transaction date for defaults, if provided. epdates <- liftIO $ rejp (setPosition startpos >> postingdatesp mdefdate) $ T.pack commentandwhitespace pdates <- case epdates of Right ds -> return ds Left e -> throwError e -- pdbg 0 $ "pdates: "++show pdates let mdate = headMay $ map snd $ filter ((=="date").fst) pdates mdate2 = headMay $ map snd $ filter ((=="date2").fst) pdates return (comment, tags, mdate, mdate2) commentp :: JournalStateParser m Text commentp = commentStartingWithp commentchars commentchars :: [Char] commentchars = "#;*" semicoloncommentp :: JournalStateParser m Text semicoloncommentp = commentStartingWithp ";" commentStartingWithp :: [Char] -> JournalStateParser m Text commentStartingWithp cs = do -- ptrace "commentStartingWith" oneOf cs lift (many spacenonewline) l <- anyChar `manyTill` (lift eolof) optional newline return $ T.pack l --- ** tags -- | Extract any tags (name:value ended by comma or newline) embedded in a string. -- -- >>> commentTags "a b:, c:c d:d, e" -- [("b",""),("c","c d:d")] -- -- >>> commentTags "a [1/1/1] [1/1] [1], [=1/1/1] [=1/1] [=1] [1/1=1/1/1] [1=1/1/1] b:c" -- [("b","c")] -- -- --[("date","1/1/1"),("date","1/1"),("date2","1/1/1"),("date2","1/1"),("date","1/1"),("date2","1/1/1"),("date","1"),("date2","1/1/1")] -- -- >>> commentTags "\na b:, \nd:e, f" -- [("b",""),("d","e")] -- commentTags :: Text -> [Tag] commentTags s = case runTextParser tagsp s of Right r -> r Left _ -> [] -- shouldn't happen -- | Parse all tags found in a string. tagsp :: Parser [Tag] tagsp = -- do -- pdbg 0 $ "tagsp" many (try (nontagp >> tagp)) -- | Parse everything up till the first tag. -- -- >>> rtp nontagp "\na b:, \nd:e, f" -- Right "\na " nontagp :: Parser String nontagp = -- do -- pdbg 0 "nontagp" -- anyChar `manyTill` (lookAhead (try (tagorbracketeddatetagsp Nothing >> return ()) <|> eof)) anyChar `manyTill` lookAhead (try (void tagp) <|> eof) -- XXX costly ? -- | Tags begin with a colon-suffixed tag name (a word beginning with -- a letter) and are followed by a tag value (any text up to a comma -- or newline, whitespace-stripped). -- -- >>> rtp tagp "a:b b , c AuxDate: 4/2" -- Right ("a","b b") -- tagp :: Parser Tag tagp = do -- pdbg 0 "tagp" n <- tagnamep v <- tagvaluep return (n,v) -- | -- >>> rtp tagnamep "a:" -- Right "a" tagnamep :: Parser Text tagnamep = -- do -- pdbg 0 "tagnamep" T.pack <$> some (noneOf (": \t\n" :: [Char])) <* char ':' tagvaluep :: TextParser m Text tagvaluep = do -- ptrace "tagvalue" v <- anyChar `manyTill` (void (try (char ',')) <|> eolof) return $ T.pack $ strip $ reverse $ dropWhile (==',') $ reverse $ strip v --- ** posting dates -- | Parse all posting dates found in a string. Posting dates can be -- expressed with date/date2 tags and/or bracketed dates. The dates -- are parsed fully to give useful errors. Missing years can be -- inferred only if a default date is provided. -- postingdatesp :: Monad m => Maybe Day -> ErroringJournalParser m [(TagName,Day)] postingdatesp mdefdate = do -- pdbg 0 $ "postingdatesp" let p = ((:[]) <$> datetagp mdefdate) <|> bracketeddatetagsp mdefdate nonp = many (notFollowedBy p >> anyChar) -- anyChar `manyTill` (lookAhead (try (p >> return ()) <|> eof)) concat <$> many (try (nonp >> p)) --- ** date tags -- | Date tags are tags with name "date" or "date2". Their value is -- parsed as a date, using the provided default date if any for -- inferring a missing year if needed. Any error in date parsing is -- reported and terminates parsing. -- -- >>> rejp (datetagp Nothing) "date: 2000/1/2 " -- Right ("date",2000-01-02) -- -- >>> rejp (datetagp (Just $ fromGregorian 2001 2 3)) "date2:3/4" -- Right ("date2",2001-03-04) -- -- >>> rejp (datetagp Nothing) "date: 3/4" -- Left ...1:9...partial date 3/4 found, but the current year is unknown... -- datetagp :: Monad m => Maybe Day -> ErroringJournalParser m (TagName,Day) datetagp mdefdate = do -- pdbg 0 "datetagp" string "date" n <- T.pack . fromMaybe "" <$> optional (string "2") char ':' startpos <- getPosition v <- lift tagvaluep -- re-parse value as a date. j <- get let ep :: Either (ParseError Char Dec) Day ep = parseWithState' j{jparsedefaultyear=first3.toGregorian <$> mdefdate} -- The value extends to a comma, newline, or end of file. -- It seems like ignoring any extra stuff following a date -- gives better errors here. (do setPosition startpos datep) -- <* eof) v case ep of Left e -> throwError $ parseErrorPretty e Right d -> return ("date"<>n, d) --- ** bracketed dates -- tagorbracketeddatetagsp :: Monad m => Maybe Day -> TextParser u m [Tag] -- tagorbracketeddatetagsp mdefdate = -- bracketeddatetagsp mdefdate <|> ((:[]) <$> tagp) -- | Parse Ledger-style bracketed posting dates ([DATE=DATE2]), as -- "date" and/or "date2" tags. Anything that looks like an attempt at -- this (a square-bracketed sequence of 0123456789/-.= containing at -- least one digit and one date separator) is also parsed, and will -- throw an appropriate error. -- -- The dates are parsed in full here so that errors are reported in -- the right position. A missing year in DATE can be inferred if a -- default date is provided. A missing year in DATE2 will be inferred -- from DATE. -- -- >>> rejp (bracketeddatetagsp Nothing) "[2016/1/2=3/4]" -- Right [("date",2016-01-02),("date2",2016-03-04)] -- -- >>> rejp (bracketeddatetagsp Nothing) "[1]" -- Left ...not a bracketed date... -- -- >>> rejp (bracketeddatetagsp Nothing) "[2016/1/32]" -- Left ...1:11:...bad date: 2016/1/32... -- -- >>> rejp (bracketeddatetagsp Nothing) "[1/31]" -- Left ...1:6:...partial date 1/31 found, but the current year is unknown... -- -- >>> rejp (bracketeddatetagsp Nothing) "[0123456789/-.=/-.=]" -- Left ...1:15:...bad date, different separators... -- bracketeddatetagsp :: Monad m => Maybe Day -> ErroringJournalParser m [(TagName, Day)] bracketeddatetagsp mdefdate = do -- pdbg 0 "bracketeddatetagsp" char '[' startpos <- getPosition let digits = "0123456789" s <- some (oneOf $ '=':digits++datesepchars) char ']' unless (any (`elem` s) digits && any (`elem` datesepchars) s) $ fail "not a bracketed date" -- looks sufficiently like a bracketed date, now we -- re-parse as dates and throw any errors j <- get let ep :: Either (ParseError Char Dec) (Maybe Day, Maybe Day) ep = parseWithState' j{jparsedefaultyear=first3.toGregorian <$> mdefdate} (do setPosition startpos md1 <- optional datep maybe (return ()) (setYear.first3.toGregorian) md1 md2 <- optional $ char '=' >> datep eof return (md1,md2) ) (T.pack s) case ep of Left e -> throwError $ parseErrorPretty e Right (md1,md2) -> return $ catMaybes [("date",) <$> md1, ("date2",) <$> md2] hledger-lib-1.2/Hledger/Read/CsvReader.hs0000644000000000000000000007756313066173044016405 0ustar0000000000000000{-# LANGUAGE CPP #-} {-| A reader for CSV data, using an extra rules file to help interpret the data. -} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE TypeSynonymInstances #-} {-# LANGUAGE FlexibleInstances #-} module Hledger.Read.CsvReader ( -- * Reader reader, -- * Misc. CsvRecord, -- rules, rulesFileFor, parseRulesFile, parseAndValidateCsvRules, expandIncludes, transactionFromCsvRecord, -- * Tests tests_Hledger_Read_CsvReader ) where import Prelude () import Prelude.Compat hiding (getContents) import Control.Exception hiding (try) import Control.Monad import Control.Monad.Except import Control.Monad.State.Strict (StateT, get, modify', evalStateT) -- import Test.HUnit import Data.Char (toLower, isDigit, isSpace) import Data.List.Compat import Data.Maybe import Data.Ord import qualified Data.Set as S import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar (Day) #if MIN_VERSION_time(1,5,0) import Data.Time.Format (parseTimeM, defaultTimeLocale) #else import Data.Time.Format (parseTime) import System.Locale (defaultTimeLocale) #endif import Safe import System.Directory (doesFileExist) import System.FilePath import System.IO (stderr) import Test.HUnit hiding (State) import Text.CSV (parseCSV, CSV) import Text.Megaparsec hiding (parse, State) import Text.Megaparsec.Text import qualified Text.Parsec as Parsec import Text.Printf (hPrintf,printf) import Hledger.Data import Hledger.Utils.UTF8IOCompat (getContents) import Hledger.Utils import Hledger.Read.Common (amountp, statusp, genericSourcePos) reader :: Reader reader = Reader {rFormat = "csv" ,rExtensions = ["csv"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from CSV data, or give an error. -- XXX currently ignores the string and reads from the file path parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse rulesfile _ f t = do r <- liftIO $ readJournalFromCsv rulesfile f t case r of Left e -> throwError e Right j -> return $ journalNumberAndTieTransactions j -- XXX does not use parseAndFinaliseJournal like the other readers -- | Read a Journal from the given CSV data (and filename, used for error -- messages), or return an error. Proceed as follows: -- @ -- 1. parse CSV conversion rules from the specified rules file, or from -- the default rules file for the specified CSV file, if it exists, -- or throw a parse error; if it doesn't exist, use built-in default rules -- 2. parse the CSV data, or throw a parse error -- 3. convert the CSV records to transactions using the rules -- 4. if the rules file didn't exist, create it with the default rules and filename -- 5. return the transactions as a Journal -- @ readJournalFromCsv :: Maybe FilePath -> FilePath -> Text -> IO (Either String Journal) readJournalFromCsv Nothing "-" _ = return $ Left "please use --rules-file when reading CSV from stdin" readJournalFromCsv mrulesfile csvfile csvdata = handle (\e -> return $ Left $ show (e :: IOException)) $ do let throwerr = throw.userError -- parse rules let rulesfile = fromMaybe (rulesFileFor csvfile) mrulesfile rulesfileexists <- doesFileExist rulesfile rulestext <- if rulesfileexists then do hPrintf stderr "using conversion rules file %s\n" rulesfile liftIO $ (readFile' rulesfile >>= expandIncludes (takeDirectory rulesfile)) else return $ defaultRulesText rulesfile rules <- liftIO (runExceptT $ parseAndValidateCsvRules rulesfile rulestext) >>= either throwerr return dbg2IO "rules" rules -- apply skip directive let skip = maybe 0 oneorerror $ getDirective "skip" rules where oneorerror "" = 1 oneorerror s = readDef (throwerr $ "could not parse skip value: " ++ show s) s -- parse csv -- parsec seems to fail if you pass it "-" here XXX try again with megaparsec let parsecfilename = if csvfile == "-" then "(stdin)" else csvfile records <- (either throwerr id . dbg2 "validateCsv" . validateCsv skip . dbg2 "parseCsv") `fmap` parseCsv parsecfilename (T.unpack csvdata) dbg1IO "first 3 csv records" $ take 3 records -- identify header lines -- let (headerlines, datalines) = identifyHeaderLines records -- mfieldnames = lastMay headerlines -- convert to transactions and return as a journal let txns = snd $ mapAccumL (\pos r -> (pos, transactionFromCsvRecord (let SourcePos name line col = pos in SourcePos name (unsafePos $ unPos line + 1) col) rules r)) (initialPos parsecfilename) records -- heuristic: if the records appear to have been in reverse date order, -- reverse them all as well as doing a txn date sort, -- so that same-day txns' original order is preserved txns' | length txns > 1 && tdate (head txns) > tdate (last txns) = reverse txns | otherwise = txns when (not rulesfileexists) $ do hPrintf stderr "created default conversion rules file %s, edit this for better results\n" rulesfile writeFile rulesfile $ T.unpack rulestext return $ Right nulljournal{jtxns=sortBy (comparing tdate) txns'} parseCsv :: FilePath -> String -> IO (Either Parsec.ParseError CSV) parseCsv path csvdata = case path of "-" -> liftM (parseCSV "(stdin)") getContents _ -> return $ parseCSV path csvdata -- | Return the cleaned up and validated CSV data, or an error. validateCsv :: Int -> Either Parsec.ParseError CSV -> Either String [CsvRecord] validateCsv _ (Left e) = Left $ show e validateCsv numhdrlines (Right rs) = validate $ drop numhdrlines $ filternulls rs where filternulls = filter (/=[""]) validate [] = Left "no CSV records found" validate rs@(first:_) | isJust lessthan2 = let r = fromJust lessthan2 in Left $ printf "CSV record %s has less than two fields" (show r) | isJust different = let r = fromJust different in Left $ printf "the first CSV record %s has %d fields but %s has %d" (show first) length1 (show r) (length r) | otherwise = Right rs where length1 = length first lessthan2 = headMay $ filter ((<2).length) rs different = headMay $ filter ((/=length1).length) rs -- -- | The highest (0-based) field index referenced in the field -- -- definitions, or -1 if no fields are defined. -- maxFieldIndex :: CsvRules -> Int -- maxFieldIndex r = maximumDef (-1) $ catMaybes [ -- dateField r -- ,statusField r -- ,codeField r -- ,amountField r -- ,amountInField r -- ,amountOutField r -- ,currencyField r -- ,accountField r -- ,account2Field r -- ,date2Field r -- ] -- rulesFileFor :: CliOpts -> FilePath -> FilePath -- rulesFileFor CliOpts{rules_file_=Just f} _ = f -- rulesFileFor CliOpts{rules_file_=Nothing} csvfile = replaceExtension csvfile ".rules" rulesFileFor :: FilePath -> FilePath rulesFileFor = (++ ".rules") csvFileFor :: FilePath -> FilePath csvFileFor = reverse . drop 6 . reverse defaultRulesText :: FilePath -> Text defaultRulesText csvfile = T.pack $ unlines ["# hledger csv conversion rules for " ++ csvFileFor (takeFileName csvfile) ,"# cf http://hledger.org/manual#csv-files" ,"" ,"account1 assets:bank:checking" ,"" ,"fields date, description, amount" ,"" ,"#skip 1" ,"" ,"#date-format %-d/%-m/%Y" ,"#date-format %-m/%-d/%Y" ,"#date-format %Y-%h-%d" ,"" ,"#currency $" ,"" ,"if ITUNES" ," account2 expenses:entertainment" ,"" ,"if (TO|FROM) SAVINGS" ," account2 assets:bank:savings\n" ] -------------------------------------------------------------------------------- -- Conversion rules parsing {- Grammar for the CSV conversion rules, more or less: RULES: RULE* RULE: ( FIELD-LIST | FIELD-ASSIGNMENT | CONDITIONAL-BLOCK | SKIP | DATE-FORMAT | COMMENT | BLANK ) NEWLINE FIELD-LIST: fields SPACE FIELD-NAME ( SPACE? , SPACE? FIELD-NAME )* FIELD-NAME: QUOTED-FIELD-NAME | BARE-FIELD-NAME QUOTED-FIELD-NAME: " (any CHAR except double-quote)+ " BARE-FIELD-NAME: any CHAR except space, tab, #, ; FIELD-ASSIGNMENT: JOURNAL-FIELD ASSIGNMENT-SEPARATOR FIELD-VALUE JOURNAL-FIELD: date | date2 | status | code | description | comment | account1 | account2 | amount | JOURNAL-PSEUDO-FIELD JOURNAL-PSEUDO-FIELD: amount-in | amount-out | currency ASSIGNMENT-SEPARATOR: SPACE | ( : SPACE? ) FIELD-VALUE: VALUE (possibly containing CSV-FIELD-REFERENCEs) CSV-FIELD-REFERENCE: % CSV-FIELD CSV-FIELD: ( FIELD-NAME | FIELD-NUMBER ) (corresponding to a CSV field) FIELD-NUMBER: DIGIT+ CONDITIONAL-BLOCK: if ( FIELD-MATCHER NEWLINE )+ INDENTED-BLOCK FIELD-MATCHER: ( CSV-FIELD-NAME SPACE? )? ( MATCHOP SPACE? )? PATTERNS MATCHOP: ~ PATTERNS: ( NEWLINE REGEXP )* REGEXP INDENTED-BLOCK: ( SPACE ( FIELD-ASSIGNMENT | COMMENT ) NEWLINE )+ REGEXP: ( NONSPACE CHAR* ) SPACE? VALUE: SPACE? ( CHAR* ) SPACE? COMMENT: SPACE? COMMENT-CHAR VALUE COMMENT-CHAR: # | ; NONSPACE: any CHAR not a SPACE-CHAR BLANK: SPACE? SPACE: SPACE-CHAR+ SPACE-CHAR: space | tab CHAR: any character except newline DIGIT: 0-9 -} {- | A set of data definitions and account-matching patterns sufficient to convert a particular CSV data file into meaningful journal transactions. -} data CsvRules = CsvRules { rdirectives :: [(DirectiveName,String)], rcsvfieldindexes :: [(CsvFieldName, CsvFieldIndex)], rassignments :: [(JournalFieldName, FieldTemplate)], rconditionalblocks :: [ConditionalBlock] } deriving (Show, Eq) type CsvRulesParser a = StateT CsvRules Parser a type DirectiveName = String type CsvFieldName = String type CsvFieldIndex = Int type JournalFieldName = String type FieldTemplate = String type ConditionalBlock = ([RecordMatcher], [(JournalFieldName, FieldTemplate)]) -- block matches if all RecordMatchers match type RecordMatcher = [RegexpPattern] -- match if any regexps match any of the csv fields -- type FieldMatcher = (CsvFieldName, [RegexpPattern]) -- match if any regexps match this csv field type DateFormat = String type RegexpPattern = String rules = CsvRules { rdirectives=[], rcsvfieldindexes=[], rassignments=[], rconditionalblocks=[] } addDirective :: (DirectiveName, String) -> CsvRules -> CsvRules addDirective d r = r{rdirectives=d:rdirectives r} addAssignment :: (JournalFieldName, FieldTemplate) -> CsvRules -> CsvRules addAssignment a r = r{rassignments=a:rassignments r} setIndexesAndAssignmentsFromList :: [CsvFieldName] -> CsvRules -> CsvRules setIndexesAndAssignmentsFromList fs r = addAssignmentsFromList fs . setCsvFieldIndexesFromList fs $ r setCsvFieldIndexesFromList :: [CsvFieldName] -> CsvRules -> CsvRules setCsvFieldIndexesFromList fs r = r{rcsvfieldindexes=zip fs [1..]} addAssignmentsFromList :: [CsvFieldName] -> CsvRules -> CsvRules addAssignmentsFromList fs r = foldl' maybeAddAssignment r journalfieldnames where maybeAddAssignment rules f = (maybe id addAssignmentFromIndex $ elemIndex f fs) rules where addAssignmentFromIndex i = addAssignment (f, "%"++show (i+1)) addConditionalBlock :: ConditionalBlock -> CsvRules -> CsvRules addConditionalBlock b r = r{rconditionalblocks=b:rconditionalblocks r} getDirective :: DirectiveName -> CsvRules -> Maybe FieldTemplate getDirective directivename = lookup directivename . rdirectives instance ShowErrorComponent String where showErrorComponent = id -- | An error-throwing action that parses this file's content -- as CSV conversion rules, interpolating any included files first, -- and runs some extra validation checks. parseRulesFile :: FilePath -> ExceptT String IO CsvRules parseRulesFile f = liftIO (readFile' f >>= expandIncludes (takeDirectory f)) >>= parseAndValidateCsvRules f -- | Look for hledger rules file-style include directives in this text, -- and interpolate the included files, recursively. -- Included file paths may be relative to the directory of the -- provided file path. -- This is a cheap hack to avoid rewriting the CSV rules parser. expandIncludes :: FilePath -> T.Text -> IO T.Text expandIncludes basedir content = do let (ls,rest) = break (T.isPrefixOf "include") $ T.lines content case rest of [] -> return $ T.unlines ls ((T.stripPrefix "include" -> Just f):ls') -> do let f' = basedir dropWhile isSpace (T.unpack f) basedir' = takeDirectory f' included <- readFile' f' >>= expandIncludes basedir' return $ T.unlines [T.unlines ls, included, T.unlines ls'] ls' -> return $ T.unlines $ ls ++ ls' -- should never get here -- | An error-throwing action that parses this text as CSV conversion rules -- and runs some extra validation checks. The file path is for error messages. parseAndValidateCsvRules :: FilePath -> T.Text -> ExceptT String IO CsvRules parseAndValidateCsvRules rulesfile s = do let rules = parseCsvRules rulesfile s case rules of Left e -> ExceptT $ return $ Left $ parseErrorPretty e Right r -> do r_ <- liftIO $ runExceptT $ validateRules r ExceptT $ case r_ of Left e -> return $ Left $ parseErrorPretty $ toParseError e Right r -> return $ Right r where toParseError :: forall s. Ord s => s -> ParseError Char s toParseError s = (mempty :: ParseError Char s) { errorCustom = S.singleton s} -- | Parse this text as CSV conversion rules. The file path is for error messages. parseCsvRules :: FilePath -> T.Text -> Either (ParseError Char Dec) CsvRules -- parseCsvRules rulesfile s = runParser csvrulesfile nullrules{baseAccount=takeBaseName rulesfile} rulesfile s parseCsvRules rulesfile s = runParser (evalStateT rulesp rules) rulesfile s -- | Return the validated rules, or an error. validateRules :: CsvRules -> ExceptT String IO CsvRules validateRules rules = do unless (isAssigned "date") $ ExceptT $ return $ Left "Please specify (at top level) the date field. Eg: date %1\n" unless ((amount && not (amountin || amountout)) || (not amount && (amountin && amountout))) $ ExceptT $ return $ Left "Please specify (at top level) either the amount field, or both the amount-in and amount-out fields. Eg: amount %2\n" ExceptT $ return $ Right rules where amount = isAssigned "amount" amountin = isAssigned "amount-in" amountout = isAssigned "amount-out" isAssigned f = isJust $ getEffectiveAssignment rules [] f -- parsers rulesp :: CsvRulesParser CsvRules rulesp = do many $ choiceInState [blankorcommentlinep "blank or comment line" ,(directivep >>= modify' . addDirective) "directive" ,(fieldnamelistp >>= modify' . setIndexesAndAssignmentsFromList) "field name list" ,(fieldassignmentp >>= modify' . addAssignment) "field assignment" ,(conditionalblockp >>= modify' . addConditionalBlock) "conditional block" ] eof r <- get return r{rdirectives=reverse $ rdirectives r ,rassignments=reverse $ rassignments r ,rconditionalblocks=reverse $ rconditionalblocks r } blankorcommentlinep :: CsvRulesParser () blankorcommentlinep = lift (pdbg 3 "trying blankorcommentlinep") >> choiceInState [blanklinep, commentlinep] blanklinep :: CsvRulesParser () blanklinep = lift (many spacenonewline) >> newline >> return () "blank line" commentlinep :: CsvRulesParser () commentlinep = lift (many spacenonewline) >> commentcharp >> lift restofline >> return () "comment line" commentcharp :: CsvRulesParser Char commentcharp = oneOf (";#*" :: [Char]) directivep :: CsvRulesParser (DirectiveName, String) directivep = (do lift $ pdbg 3 "trying directive" d <- choiceInState $ map string directives v <- (((char ':' >> lift (many spacenonewline)) <|> lift (some spacenonewline)) >> directivevalp) <|> (optional (char ':') >> lift (many spacenonewline) >> lift eolof >> return "") return (d,v) ) "directive" directives = ["date-format" -- ,"default-account1" -- ,"default-currency" -- ,"skip-lines" -- old ,"skip" -- ,"base-account" -- ,"base-currency" ] directivevalp :: CsvRulesParser String directivevalp = anyChar `manyTill` lift eolof fieldnamelistp :: CsvRulesParser [CsvFieldName] fieldnamelistp = (do lift $ pdbg 3 "trying fieldnamelist" string "fields" optional $ char ':' lift (some spacenonewline) let separator = lift (many spacenonewline) >> char ',' >> lift (many spacenonewline) f <- fromMaybe "" <$> optional fieldnamep fs <- some $ (separator >> fromMaybe "" <$> optional fieldnamep) lift restofline return $ map (map toLower) $ f:fs ) "field name list" fieldnamep :: CsvRulesParser String fieldnamep = quotedfieldnamep <|> barefieldnamep quotedfieldnamep :: CsvRulesParser String quotedfieldnamep = do char '"' f <- some $ noneOf ("\"\n:;#~" :: [Char]) char '"' return f barefieldnamep :: CsvRulesParser String barefieldnamep = some $ noneOf (" \t\n,;#~" :: [Char]) fieldassignmentp :: CsvRulesParser (JournalFieldName, FieldTemplate) fieldassignmentp = do lift $ pdbg 3 "trying fieldassignmentp" f <- journalfieldnamep assignmentseparatorp v <- fieldvalp return (f,v) "field assignment" journalfieldnamep :: CsvRulesParser String journalfieldnamep = lift (pdbg 2 "trying journalfieldnamep") >> choiceInState (map string journalfieldnames) journalfieldnames = [-- pseudo fields: "amount-in" ,"amount-out" ,"currency" -- standard fields: ,"date2" ,"date" ,"status" ,"code" ,"description" ,"amount" ,"account1" ,"account2" ,"comment" ] assignmentseparatorp :: CsvRulesParser () assignmentseparatorp = do lift $ pdbg 3 "trying assignmentseparatorp" choice [ -- try (lift (many spacenonewline) >> oneOf ":="), try (lift (many spacenonewline) >> char ':'), spaceChar ] _ <- lift (many spacenonewline) return () fieldvalp :: CsvRulesParser String fieldvalp = do lift $ pdbg 2 "trying fieldvalp" anyChar `manyTill` lift eolof conditionalblockp :: CsvRulesParser ConditionalBlock conditionalblockp = do lift $ pdbg 3 "trying conditionalblockp" string "if" >> lift (many spacenonewline) >> optional newline ms <- some recordmatcherp as <- many (lift (some spacenonewline) >> fieldassignmentp) when (null as) $ fail "start of conditional block found, but no assignment rules afterward\n(assignment rules in a conditional block should be indented)\n" return (ms, as) "conditional block" recordmatcherp :: CsvRulesParser [String] recordmatcherp = do lift $ pdbg 2 "trying recordmatcherp" -- pos <- currentPos _ <- optional (matchoperatorp >> lift (many spacenonewline) >> optional newline) ps <- patternsp when (null ps) $ fail "start of record matcher found, but no patterns afterward\n(patterns should not be indented)\n" return ps "record matcher" matchoperatorp :: CsvRulesParser String matchoperatorp = choiceInState $ map string ["~" -- ,"!~" -- ,"=" -- ,"!=" ] patternsp :: CsvRulesParser [String] patternsp = do lift $ pdbg 3 "trying patternsp" ps <- many regexp return ps regexp :: CsvRulesParser String regexp = do lift $ pdbg 3 "trying regexp" notFollowedBy matchoperatorp c <- lift nonspace cs <- anyChar `manyTill` lift eolof return $ strip $ c:cs -- fieldmatcher = do -- pdbg 2 "trying fieldmatcher" -- f <- fromMaybe "all" `fmap` (optional $ do -- f' <- fieldname -- lift (many spacenonewline) -- return f') -- char '~' -- lift (many spacenonewline) -- ps <- patterns -- let r = "(" ++ intercalate "|" ps ++ ")" -- return (f,r) -- "field matcher" -------------------------------------------------------------------------------- -- Converting CSV records to journal transactions type CsvRecord = [String] -- Convert a CSV record to a transaction using the rules, or raise an -- error if the data can not be parsed. transactionFromCsvRecord :: SourcePos -> CsvRules -> CsvRecord -> Transaction transactionFromCsvRecord sourcepos rules record = t where mdirective = (`getDirective` rules) mfieldtemplate = getEffectiveAssignment rules record render = renderTemplate rules record mskip = mdirective "skip" mdefaultcurrency = mdirective "default-currency" mparsedate = parseDateWithFormatOrDefaultFormats (mdirective "date-format") -- render each field using its template and the csv record, and -- in some cases parse the rendered string (eg dates and amounts) mdateformat = mdirective "date-format" date = render $ fromMaybe "" $ mfieldtemplate "date" date' = fromMaybe (error' $ dateerror "date" date mdateformat) $ mparsedate date mdate2 = maybe Nothing (Just . render) $ mfieldtemplate "date2" mdate2' = maybe Nothing (maybe (error' $ dateerror "date2" (fromMaybe "" mdate2) mdateformat) Just . mparsedate) mdate2 dateerror datefield value mdateformat = unlines ["error: could not parse \""++value++"\" as a date using date format "++maybe "\"YYYY/M/D\", \"YYYY-M-D\" or \"YYYY.M.D\"" show mdateformat ,"the CSV record is: "++intercalate ", " (map show record) ,"the "++datefield++" rule is: "++(fromMaybe "required, but missing" $ mfieldtemplate datefield) ,"the date-format is: "++fromMaybe "unspecified" mdateformat ,"you may need to " ++"change your "++datefield++" rule, " ++maybe "add a" (const "change your") mdateformat++" date-format rule, " ++"or "++maybe "add a" (const "change your") mskip++" skip rule" ,"for m/d/y or d/m/y dates, use date-format %-m/%-d/%Y or date-format %-d/%-m/%Y" ] status = case mfieldtemplate "status" of Nothing -> Uncleared Just str -> either statuserror id . runParser (statusp <* eof) "" . T.pack $ render str where statuserror err = error' $ unlines ["error: could not parse \""++str++"\" as a cleared status (should be *, ! or empty)" ,"the parse error is: "++show err ] code = maybe "" render $ mfieldtemplate "code" description = maybe "" render $ mfieldtemplate "description" comment = maybe "" render $ mfieldtemplate "comment" precomment = maybe "" render $ mfieldtemplate "precomment" currency = maybe (fromMaybe "" mdefaultcurrency) render $ mfieldtemplate "currency" amountstr = (currency++) $ negateIfParenthesised $ getAmountStr rules record amount = either amounterror (Mixed . (:[])) $ runParser (evalStateT (amountp <* eof) mempty) "" $ T.pack amountstr amounterror err = error' $ unlines ["error: could not parse \""++amountstr++"\" as an amount" ,showRecord record ,"the amount rule is: "++(fromMaybe "" $ mfieldtemplate "amount") ,"the currency rule is: "++(fromMaybe "unspecified" $ mfieldtemplate "currency") ,"the default-currency is: "++fromMaybe "unspecified" mdefaultcurrency ,"the parse error is: "++show err ,"you may need to " ++"change your amount or currency rules, " ++"or "++maybe "add a" (const "change your") mskip++" skip rule" ] -- Using costOfMixedAmount here to allow complex costs like "10 GBP @@ 15 USD". -- Aim is to have "10 GBP @@ 15 USD" applied to account2, but have "-15USD" applied to account1 amount1 = costOfMixedAmount amount amount2 = (-amount) s `or` def = if null s then def else s defaccount1 = fromMaybe "unknown" $ mdirective "default-account1" defaccount2 = case isNegativeMixedAmount amount2 of Just True -> "income:unknown" _ -> "expenses:unknown" account1 = T.pack $ maybe "" render (mfieldtemplate "account1") `or` defaccount1 account2 = T.pack $ maybe "" render (mfieldtemplate "account2") `or` defaccount2 -- build the transaction t = nulltransaction{ tsourcepos = genericSourcePos sourcepos, tdate = date', tdate2 = mdate2', tstatus = status, tcode = T.pack code, tdescription = T.pack description, tcomment = T.pack comment, tpreceding_comment_lines = T.pack precomment, tpostings = [posting {paccount=account2, pamount=amount2, ptransaction=Just t} ,posting {paccount=account1, pamount=amount1, ptransaction=Just t} ] } getAmountStr :: CsvRules -> CsvRecord -> String getAmountStr rules record = let mamount = getEffectiveAssignment rules record "amount" mamountin = getEffectiveAssignment rules record "amount-in" mamountout = getEffectiveAssignment rules record "amount-out" render = fmap (strip . renderTemplate rules record) in case (render mamount, render mamountin, render mamountout) of (Just "", Nothing, Nothing) -> error' $ "amount has no value\n"++showRecord record (Just a, Nothing, Nothing) -> a (Nothing, Just "", Just "") -> error' $ "neither amount-in or amount-out has a value\n"++showRecord record (Nothing, Just i, Just "") -> i (Nothing, Just "", Just o) -> negateStr o (Nothing, Just _, Just _) -> error' $ "both amount-in and amount-out have a value\n"++showRecord record _ -> error' $ "found values for amount and for amount-in/amount-out - please use either amount or amount-in/amount-out\n"++showRecord record negateIfParenthesised :: String -> String negateIfParenthesised ('(':s) | lastMay s == Just ')' = negateStr $ init s negateIfParenthesised s = s negateStr :: String -> String negateStr ('-':s) = s negateStr s = '-':s -- | Show a (approximate) recreation of the original CSV record. showRecord :: CsvRecord -> String showRecord r = "the CSV record is: "++intercalate ", " (map show r) -- | Given the conversion rules, a CSV record and a journal entry field name, find -- the template value ultimately assigned to this field, either at top -- level or in a matching conditional block. Conditional blocks' -- patterns are matched against an approximation of the original CSV -- record: all the field values with commas intercalated. getEffectiveAssignment :: CsvRules -> CsvRecord -> JournalFieldName -> Maybe FieldTemplate getEffectiveAssignment rules record f = lastMay $ assignmentsFor f where assignmentsFor f = map snd $ filter ((==f).fst) $ toplevelassignments ++ conditionalassignments where toplevelassignments = rassignments rules conditionalassignments = concatMap snd $ filter blockMatches $ blocksAssigning f where blocksAssigning f = filter (any ((==f).fst) . snd) $ rconditionalblocks rules blockMatches :: ConditionalBlock -> Bool blockMatches (matchers,_) = all matcherMatches matchers where matcherMatches :: RecordMatcher -> Bool -- matcherMatches pats = any patternMatches pats matcherMatches pats = patternMatches $ "(" ++ intercalate "|" pats ++ ")" where patternMatches :: RegexpPattern -> Bool patternMatches pat = regexMatchesCI pat csvline where csvline = intercalate "," record renderTemplate :: CsvRules -> CsvRecord -> FieldTemplate -> String renderTemplate rules record t = regexReplaceBy "%[A-z0-9]+" replace t where replace ('%':pat) = maybe pat (\i -> atDef "" record (i-1)) mi where mi | all isDigit pat = readMay pat | otherwise = lookup pat $ rcsvfieldindexes rules replace pat = pat -- Parse the date string using the specified date-format, or if unspecified try these default formats: -- YYYY/MM/DD, YYYY-MM-DD, YYYY.MM.DD, MM/DD/YYYY (month and day can be 1 or 2 digits, year must be 4). parseDateWithFormatOrDefaultFormats :: Maybe DateFormat -> String -> Maybe Day parseDateWithFormatOrDefaultFormats mformat s = firstJust $ map parsewith formats where parsetime = #if MIN_VERSION_time(1,5,0) parseTimeM True #else parseTime #endif parsewith = flip (parsetime defaultTimeLocale) s formats = maybe ["%Y/%-m/%-d" ,"%Y-%-m-%-d" ,"%Y.%-m.%-d" -- ,"%-m/%-d/%Y" -- ,parseTime defaultTimeLocale "%Y/%m/%e" (take 5 s ++ "0" ++ drop 5 s) -- ,parseTime defaultTimeLocale "%Y-%m-%e" (take 5 s ++ "0" ++ drop 5 s) -- ,parseTime defaultTimeLocale "%m/%e/%Y" ('0':s) -- ,parseTime defaultTimeLocale "%m-%e-%Y" ('0':s) ] (:[]) mformat -------------------------------------------------------------------------------- -- tests tests_Hledger_Read_CsvReader = TestList (test_parser) -- ++ test_description_parsing) -- test_description_parsing = [ -- "description-field 1" ~: assertParseDescription "description-field 1\n" [FormatField False Nothing Nothing (FieldNo 1)] -- , "description-field 1 " ~: assertParseDescription "description-field 1 \n" [FormatField False Nothing Nothing (FieldNo 1)] -- , "description-field %(1)" ~: assertParseDescription "description-field %(1)\n" [FormatField False Nothing Nothing (FieldNo 1)] -- , "description-field %(1)/$(2)" ~: assertParseDescription "description-field %(1)/%(2)\n" [ -- FormatField False Nothing Nothing (FieldNo 1) -- , FormatLiteral "/" -- , FormatField False Nothing Nothing (FieldNo 2) -- ] -- ] -- where -- assertParseDescription string expected = do assertParseEqual (parseDescription string) (rules {descriptionField = expected}) -- parseDescription :: String -> Either ParseError CsvRules -- parseDescription x = runParser descriptionfieldWrapper rules "(unknown)" x -- descriptionfieldWrapper :: GenParser Char CsvRules CsvRules -- descriptionfieldWrapper = do -- descriptionfield -- r <- getState -- return r test_parser = [ "convert rules parsing: empty file" ~: do -- let assertMixedAmountParse parseresult mixedamount = -- (either (const "parse error") showMixedAmountDebug parseresult) ~?= (showMixedAmountDebug mixedamount) assertParseEqual (parseCsvRules "unknown" "") rules -- ,"convert rules parsing: accountrule" ~: do -- assertParseEqual (parseWithState rules accountrule "A\na\n") -- leading blank line required -- ([("A",Nothing)], "a") ,"convert rules parsing: trailing comments" ~: do assertParse (parseWithState' rules rulesp "skip\n# \n#\n") ,"convert rules parsing: trailing blank lines" ~: do assertParse (parseWithState' rules rulesp "skip\n\n \n") ,"convert rules parsing: empty field value" ~: do assertParse (parseWithState' rules rulesp "account1 \nif foo\n account2 foo\n") -- not supported -- ,"convert rules parsing: no final newline" ~: do -- assertParse (parseWithState rules csvrulesfile "A\na") -- assertParse (parseWithState rules csvrulesfile "A\na\n# \n#") -- assertParse (parseWithState rules csvrulesfile "A\na\n\n ") -- (rules{ -- -- dateField=Maybe FieldPosition, -- -- statusField=Maybe FieldPosition, -- -- codeField=Maybe FieldPosition, -- -- descriptionField=Maybe FieldPosition, -- -- amountField=Maybe FieldPosition, -- -- currencyField=Maybe FieldPosition, -- -- baseCurrency=Maybe String, -- -- baseAccount=AccountName, -- accountRules=[ -- ([("A",Nothing)], "a") -- ] -- }) ] hledger-lib-1.2/Hledger/Read/JournalReader.hs0000644000000000000000000006113613066173044017251 0ustar0000000000000000--- * doc -- Lines beginning "--- *" are collapsible orgstruct nodes. Emacs users, -- (add-hook 'haskell-mode-hook -- (lambda () (set-variable 'orgstruct-heading-prefix-regexp "--- " t)) -- 'orgstruct-mode) -- and press TAB on nodes to expand/collapse. {-| A reader for hledger's journal file format (). hledger's journal format is a compatible subset of c++ ledger's (), so this reader should handle many ledger files as well. Example: @ 2012\/3\/24 gift expenses:gifts $10 assets:cash @ Journal format supports the include directive which can read files in other formats, so the other file format readers need to be importable here. Some low-level journal syntax parsers which those readers also use are therefore defined separately in Hledger.Read.Common, avoiding import cycles. -} --- * module {-# LANGUAGE CPP, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings #-} module Hledger.Read.JournalReader ( --- * exports -- * Reader reader, -- * Parsing utils genericSourcePos, parseAndFinaliseJournal, runJournalParser, rjp, runErroringJournalParser, rejp, -- * Parsers used elsewhere getParentAccount, journalp, directivep, defaultyeardirectivep, marketpricedirectivep, datetimep, datep, -- codep, -- accountnamep, modifiedaccountnamep, postingp, -- amountp, -- amountp', -- mamountp', -- numberp, statusp, emptyorcommentlinep, followingcommentp, accountaliasp -- * Tests ,tests_Hledger_Read_JournalReader ) where --- * imports import Prelude () import Prelude.Compat hiding (readFile) import qualified Control.Exception as C import Control.Monad import Control.Monad.Except (ExceptT(..), runExceptT, throwError) import Control.Monad.State.Strict import qualified Data.Map.Strict as M import Data.Monoid import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Data.Time.LocalTime import Safe import Test.HUnit #ifdef TESTS import Test.Framework import Text.Megaparsec.Error #endif import Text.Megaparsec hiding (parse) import Text.Printf import System.FilePath import Hledger.Data import Hledger.Read.Common import Hledger.Read.TimeclockReader (timeclockfilep) import Hledger.Read.TimedotReader (timedotfilep) import Hledger.Utils -- $setup -- >>> :set -XOverloadedStrings --- * reader reader :: Reader reader = Reader {rFormat = "journal" ,rExtensions = ["journal", "j", "hledger", "ledger"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from hledger's journal file -- format, or give an error. parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse _ = parseAndFinaliseJournal journalp --- * parsers --- ** journal -- | A journal parser. Accumulates and returns a "ParsedJournal", -- which should be finalised/validated before use. -- -- >>> rejp (journalp <* eof) "2015/1/1\n a 0\n" -- Right Journal with 1 transactions, 1 accounts -- journalp :: MonadIO m => ErroringJournalParser m ParsedJournal journalp = do many addJournalItemP eof get -- | A side-effecting parser; parses any kind of journal item -- and updates the parse state accordingly. addJournalItemP :: MonadIO m => ErroringJournalParser m () addJournalItemP = -- all journal line types can be distinguished by the first -- character, can use choice without backtracking choice [ directivep , transactionp >>= modify' . addTransaction , modifiertransactionp >>= modify' . addModifierTransaction , periodictransactionp >>= modify' . addPeriodicTransaction , marketpricedirectivep >>= modify' . addMarketPrice , void emptyorcommentlinep , void multilinecommentp ] "transaction or directive" --- ** directives -- | Parse any journal directive and update the parse state accordingly. -- Cf http://hledger.org/manual.html#directives, -- http://ledger-cli.org/3.0/doc/ledger3.html#Command-Directives directivep :: MonadIO m => ErroringJournalParser m () directivep = (do optional $ char '!' choiceInState [ includedirectivep ,aliasdirectivep ,endaliasesdirectivep ,accountdirectivep ,applyaccountdirectivep ,commoditydirectivep ,endapplyaccountdirectivep ,tagdirectivep ,endtagdirectivep ,defaultyeardirectivep ,defaultcommoditydirectivep ,commodityconversiondirectivep ,ignoredpricecommoditydirectivep ] ) "directive" includedirectivep :: MonadIO m => ErroringJournalParser m () includedirectivep = do string "include" lift (some spacenonewline) filename <- lift restofline parentpos <- getPosition parentj <- get let childj = newJournalWithParseStateFrom parentj (ej :: Either String ParsedJournal) <- liftIO $ runExceptT $ do let curdir = takeDirectory (sourceName parentpos) filepath <- expandPath curdir filename `orRethrowIOError` (show parentpos ++ " locating " ++ filename) txt <- readFileAnyLineEnding filepath `orRethrowIOError` (show parentpos ++ " reading " ++ filepath) (ej1::Either (ParseError Char Dec) ParsedJournal) <- runParserT (evalStateT (choiceInState [journalp ,timeclockfilep ,timedotfilep -- can't include a csv file yet, that reader is special ]) childj) filepath txt either (throwError . ((show parentpos ++ " in included file " ++ show filename ++ ":\n") ++) . show) (return . journalAddFile (filepath, txt)) ej1 case ej of Left e -> throwError e Right childj -> modify' (\parentj -> childj <> parentj) -- discard child's parse info, prepend its (reversed) list data, combine other fields newJournalWithParseStateFrom :: Journal -> Journal newJournalWithParseStateFrom j = mempty{ jparsedefaultyear = jparsedefaultyear j ,jparsedefaultcommodity = jparsedefaultcommodity j ,jparseparentaccounts = jparseparentaccounts j ,jparsealiases = jparsealiases j -- ,jparsetransactioncount = jparsetransactioncount j ,jparsetimeclockentries = jparsetimeclockentries j } -- | Lift an IO action into the exception monad, rethrowing any IO -- error with the given message prepended. orRethrowIOError :: IO a -> String -> ExceptT String IO a orRethrowIOError io msg = ExceptT $ (Right <$> io) `C.catch` \(e::C.IOException) -> return $ Left $ printf "%s:\n%s" msg (show e) accountdirectivep :: JournalStateParser m () accountdirectivep = do string "account" lift (some spacenonewline) acct <- lift accountnamep newline many indentedlinep modify' (\j -> j{jaccounts = acct : jaccounts j}) indentedlinep :: JournalStateParser m String indentedlinep = lift (some spacenonewline) >> (rstrip <$> lift restofline) -- | Parse a one-line or multi-line commodity directive. -- -- >>> Right _ <- rejp commoditydirectivep "commodity $1.00" -- >>> Right _ <- rejp commoditydirectivep "commodity $\n format $1.00" -- >>> Right _ <- rejp commoditydirectivep "commodity $\n\n" -- a commodity with no format -- >>> Right _ <- rejp commoditydirectivep "commodity $1.00\n format $1.00" -- both, what happens ? commoditydirectivep :: Monad m => ErroringJournalParser m () commoditydirectivep = try commoditydirectiveonelinep <|> commoditydirectivemultilinep -- | Parse a one-line commodity directive. -- -- >>> Right _ <- rejp commoditydirectiveonelinep "commodity $1.00" -- >>> Right _ <- rejp commoditydirectiveonelinep "commodity $1.00 ; blah\n" commoditydirectiveonelinep :: Monad m => JournalStateParser m () commoditydirectiveonelinep = do string "commodity" lift (some spacenonewline) Amount{acommodity,astyle} <- amountp lift (many spacenonewline) _ <- followingcommentp <|> (lift eolof >> return "") let comm = Commodity{csymbol=acommodity, cformat=Just astyle} modify' (\j -> j{jcommodities=M.insert acommodity comm $ jcommodities j}) -- | Parse a multi-line commodity directive, containing 0 or more format subdirectives. -- -- >>> Right _ <- rejp commoditydirectivemultilinep "commodity $ ; blah \n format $1.00 ; blah" commoditydirectivemultilinep :: Monad m => ErroringJournalParser m () commoditydirectivemultilinep = do string "commodity" lift (some spacenonewline) sym <- lift commoditysymbolp _ <- followingcommentp <|> (lift eolof >> return "") mformat <- lastMay <$> many (indented $ formatdirectivep sym) let comm = Commodity{csymbol=sym, cformat=mformat} modify' (\j -> j{jcommodities=M.insert sym comm $ jcommodities j}) where indented = (lift (some spacenonewline) >>) -- | Parse a format (sub)directive, throwing a parse error if its -- symbol does not match the one given. formatdirectivep :: Monad m => CommoditySymbol -> ErroringJournalParser m AmountStyle formatdirectivep expectedsym = do string "format" lift (some spacenonewline) pos <- getPosition Amount{acommodity,astyle} <- amountp _ <- followingcommentp <|> (lift eolof >> return "") if acommodity==expectedsym then return astyle else parserErrorAt pos $ printf "commodity directive symbol \"%s\" and format directive symbol \"%s\" should be the same" expectedsym acommodity applyaccountdirectivep :: JournalStateParser m () applyaccountdirectivep = do string "apply" >> lift (some spacenonewline) >> string "account" lift (some spacenonewline) parent <- lift accountnamep newline pushParentAccount parent endapplyaccountdirectivep :: JournalStateParser m () endapplyaccountdirectivep = do string "end" >> lift (some spacenonewline) >> string "apply" >> lift (some spacenonewline) >> string "account" popParentAccount aliasdirectivep :: JournalStateParser m () aliasdirectivep = do string "alias" lift (some spacenonewline) alias <- lift accountaliasp addAccountAlias alias accountaliasp :: TextParser m AccountAlias accountaliasp = regexaliasp <|> basicaliasp basicaliasp :: TextParser m AccountAlias basicaliasp = do -- pdbg 0 "basicaliasp" old <- rstrip <$> (some $ noneOf ("=" :: [Char])) char '=' many spacenonewline new <- rstrip <$> anyChar `manyTill` eolof -- don't require a final newline, good for cli options return $ BasicAlias (T.pack old) (T.pack new) regexaliasp :: TextParser m AccountAlias regexaliasp = do -- pdbg 0 "regexaliasp" char '/' re <- some $ noneOf ("/\n\r" :: [Char]) -- paranoid: don't try to read past line end char '/' many spacenonewline char '=' many spacenonewline repl <- rstrip <$> anyChar `manyTill` eolof return $ RegexAlias re repl endaliasesdirectivep :: JournalStateParser m () endaliasesdirectivep = do string "end aliases" clearAccountAliases tagdirectivep :: JournalStateParser m () tagdirectivep = do string "tag" "tag directive" lift (some spacenonewline) _ <- lift $ some nonspace lift restofline return () endtagdirectivep :: JournalStateParser m () endtagdirectivep = do (string "end tag" <|> string "pop") "end tag or pop directive" lift restofline return () defaultyeardirectivep :: JournalStateParser m () defaultyeardirectivep = do char 'Y' "default year" lift (many spacenonewline) y <- some digitChar let y' = read y failIfInvalidYear y setYear y' defaultcommoditydirectivep :: Monad m => JournalStateParser m () defaultcommoditydirectivep = do char 'D' "default commodity" lift (some spacenonewline) Amount{..} <- amountp lift restofline setDefaultCommodityAndStyle (acommodity, astyle) marketpricedirectivep :: Monad m => JournalStateParser m MarketPrice marketpricedirectivep = do char 'P' "market price" lift (many spacenonewline) date <- try (do {LocalTime d _ <- datetimep; return d}) <|> datep -- a time is ignored lift (some spacenonewline) symbol <- lift commoditysymbolp lift (many spacenonewline) price <- amountp lift restofline return $ MarketPrice date symbol price ignoredpricecommoditydirectivep :: JournalStateParser m () ignoredpricecommoditydirectivep = do char 'N' "ignored-price commodity" lift (some spacenonewline) lift commoditysymbolp lift restofline return () commodityconversiondirectivep :: Monad m => JournalStateParser m () commodityconversiondirectivep = do char 'C' "commodity conversion" lift (some spacenonewline) amountp lift (many spacenonewline) char '=' lift (many spacenonewline) amountp lift restofline return () --- ** transactions modifiertransactionp :: MonadIO m => ErroringJournalParser m ModifierTransaction modifiertransactionp = do char '=' "modifier transaction" lift (many spacenonewline) valueexpr <- T.pack <$> lift restofline postings <- postingsp Nothing return $ ModifierTransaction valueexpr postings periodictransactionp :: MonadIO m => ErroringJournalParser m PeriodicTransaction periodictransactionp = do char '~' "periodic transaction" lift (many spacenonewline) periodexpr <- T.pack <$> lift restofline postings <- postingsp Nothing return $ PeriodicTransaction periodexpr postings -- | Parse a (possibly unbalanced) transaction. transactionp :: MonadIO m => ErroringJournalParser m Transaction transactionp = do -- ptrace "transactionp" pos <- getPosition date <- datep "transaction" edate <- optional (secondarydatep date) "secondary date" lookAhead (lift spacenonewline <|> newline) "whitespace or newline" status <- lift statusp "cleared status" code <- T.pack <$> lift codep "transaction code" description <- T.pack . strip <$> descriptionp comment <- try followingcommentp <|> (newline >> return "") let tags = commentTags comment postings <- postingsp (Just date) pos' <- getPosition let sourcepos = journalSourcePos pos pos' return $ txnTieKnot $ Transaction 0 sourcepos date edate status code description comment tags postings "" #ifdef TESTS test_transactionp = do let s `gives` t = do let p = parseWithState mempty transactionp s assertBool $ isRight p let Right t2 = p -- same f = assertEqual (f t) (f t2) assertEqual (tdate t) (tdate t2) assertEqual (tdate2 t) (tdate2 t2) assertEqual (tstatus t) (tstatus t2) assertEqual (tcode t) (tcode t2) assertEqual (tdescription t) (tdescription t2) assertEqual (tcomment t) (tcomment t2) assertEqual (ttags t) (ttags t2) assertEqual (tpreceding_comment_lines t) (tpreceding_comment_lines t2) assertEqual (show $ tpostings t) (show $ tpostings t2) -- "0000/01/01\n\n" `gives` nulltransaction unlines [ "2012/05/14=2012/05/15 (code) desc ; tcomment1", " ; tcomment2", " ; ttag1: val1", " * a $1.00 ; pcomment1", " ; pcomment2", " ; ptag1: val1", " ; ptag2: val2" ] `gives` nulltransaction{ tdate=parsedate "2012/05/14", tdate2=Just $ parsedate "2012/05/15", tstatus=Uncleared, tcode="code", tdescription="desc", tcomment=" tcomment1\n tcomment2\n ttag1: val1\n", ttags=[("ttag1","val1")], tpostings=[ nullposting{ pstatus=Cleared, paccount="a", pamount=Mixed [usd 1], pcomment=" pcomment1\n pcomment2\n ptag1: val1\n ptag2: val2\n", ptype=RegularPosting, ptags=[("ptag1","val1"),("ptag2","val2")], ptransaction=Nothing } ], tpreceding_comment_lines="" } unlines [ "2015/1/1", ] `gives` nulltransaction{ tdate=parsedate "2015/01/01", } assertRight $ parseWithState mempty transactionp $ unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking $-47.18" ,"" ] -- transactionp should not parse just a date assertLeft $ parseWithState mempty transactionp "2009/1/1\n" -- transactionp should not parse just a date and description assertLeft $ parseWithState mempty transactionp "2009/1/1 a\n" -- transactionp should not parse a following comment as part of the description let p = parseWithState mempty transactionp "2009/1/1 a ;comment\n b 1\n" assertRight p assertEqual "a" (let Right p' = p in tdescription p') -- parse transaction with following whitespace line assertRight $ parseWithState mempty transactionp $ unlines ["2012/1/1" ," a 1" ," b" ," " ] let p = parseWithState mempty transactionp $ unlines ["2009/1/1 x ; transaction comment" ," a 1 ; posting 1 comment" ," ; posting 1 comment 2" ," b" ," ; posting 2 comment" ] assertRight p assertEqual 2 (let Right t = p in length $ tpostings t) #endif --- ** postings -- Parse the following whitespace-beginning lines as postings, posting -- tags, and/or comments (inferring year, if needed, from the given date). postingsp :: MonadIO m => Maybe Day -> ErroringJournalParser m [Posting] postingsp mdate = many (try $ postingp mdate) "postings" -- linebeginningwithspaces :: Monad m => JournalParser m String -- linebeginningwithspaces = do -- sp <- lift (some spacenonewline) -- c <- nonspace -- cs <- lift restofline -- return $ sp ++ (c:cs) ++ "\n" postingp :: MonadIO m => Maybe Day -> ErroringJournalParser m Posting postingp mtdate = do -- pdbg 0 "postingp" lift (some spacenonewline) status <- lift statusp lift (many spacenonewline) account <- modifiedaccountnamep let (ptype, account') = (accountNamePostingType account, textUnbracket account) amount <- spaceandamountormissingp massertion <- partialbalanceassertionp _ <- fixedlotpricep lift (many spacenonewline) (comment,tags,mdate,mdate2) <- try (followingcommentandtagsp mtdate) <|> (newline >> return ("",[],Nothing,Nothing)) return posting { pdate=mdate , pdate2=mdate2 , pstatus=status , paccount=account' , pamount=amount , pcomment=comment , ptype=ptype , ptags=tags , pbalanceassertion=massertion } #ifdef TESTS test_postingp = do let s `gives` ep = do let parse = parseWithState mempty (postingp Nothing) s assertBool -- "postingp parser" $ isRight parse let Right ap = parse same f = assertEqual (f ep) (f ap) same pdate same pstatus same paccount same pamount same pcomment same ptype same ptags same ptransaction " expenses:food:dining $10.00 ; a: a a \n ; b: b b \n" `gives` posting{paccount="expenses:food:dining", pamount=Mixed [usd 10], pcomment=" a: a a \n b: b b \n", ptags=[("a","a a"), ("b","b b")]} " a 1 ; [2012/11/28]\n" `gives` ("a" `post` num 1){pcomment=" [2012/11/28]\n" ,ptags=[("date","2012/11/28")] ,pdate=parsedateM "2012/11/28"} " a 1 ; a:a, [=2012/11/28]\n" `gives` ("a" `post` num 1){pcomment=" a:a, [=2012/11/28]\n" ,ptags=[("a","a"), ("date2","2012/11/28")] ,pdate=Nothing} " a 1 ; a:a\n ; [2012/11/28=2012/11/29],b:b\n" `gives` ("a" `post` num 1){pcomment=" a:a\n [2012/11/28=2012/11/29],b:b\n" ,ptags=[("a","a"), ("date","2012/11/28"), ("date2","2012/11/29"), ("b","b")] ,pdate=parsedateM "2012/11/28"} assertBool -- "postingp parses a quoted commodity with numbers" (isRight $ parseWithState mempty (postingp Nothing) " a 1 \"DE123\"\n") -- ,"postingp parses balance assertions and fixed lot prices" ~: do assertBool (isRight $ parseWithState mempty (postingp Nothing) " a 1 \"DE123\" =$1 { =2.2 EUR} \n") -- let parse = parseWithState mempty postingp " a\n ;next-line comment\n" -- assertRight parse -- let Right p = parse -- assertEqual "next-line comment\n" (pcomment p) -- assertEqual (Just nullmixedamt) (pbalanceassertion p) #endif --- * more tests tests_Hledger_Read_JournalReader = TestList $ concat [ -- test_numberp [ "showParsedMarketPrice" ~: do let mp = parseWithState mempty marketpricedirectivep "P 2017/01/30 BTC $922.83\n" mpString = (fmap . fmap) showMarketPrice mp mpString `is` (Just (Right "P 2017/01/30 BTC $922.83")) ] ] {- old hunit tests tests_Hledger_Read_JournalReader = TestList $ concat [ test_numberp, test_amountp, test_spaceandamountormissingp, test_tagcomment, test_inlinecomment, test_comments, test_ledgerDateSyntaxToTags, test_postingp, test_transactionp, [ "modifiertransactionp" ~: do assertParse (parseWithState mempty modifiertransactionp "= (some value expr)\n some:postings 1\n") ,"periodictransactionp" ~: do assertParse (parseWithState mempty periodictransactionp "~ (some period expr)\n some:postings 1\n") ,"directivep" ~: do assertParse (parseWithState mempty directivep "!include /some/file.x\n") assertParse (parseWithState mempty directivep "account some:account\n") assertParse (parseWithState mempty (directivep >> directivep) "!account a\nend\n") ,"comment" ~: do assertParse (parseWithState mempty comment "; some comment \n") assertParse (parseWithState mempty comment " \t; x\n") assertParse (parseWithState mempty comment "#x") ,"datep" ~: do assertParse (parseWithState mempty datep "2011/1/1") assertParseFailure (parseWithState mempty datep "1/1") assertParse (parseWithState mempty{jpsYear=Just 2011} datep "1/1") ,"datetimep" ~: do let p = do {t <- datetimep; eof; return t} bad = assertParseFailure . parseWithState mempty p good = assertParse . parseWithState mempty p bad "2011/1/1" bad "2011/1/1 24:00:00" bad "2011/1/1 00:60:00" bad "2011/1/1 00:00:60" good "2011/1/1 00:00" good "2011/1/1 23:59:59" good "2011/1/1 3:5:7" -- timezone is parsed but ignored let startofday = LocalTime (fromGregorian 2011 1 1) (TimeOfDay 0 0 (fromIntegral 0)) assertParseEqual (parseWithState mempty p "2011/1/1 00:00-0800") startofday assertParseEqual (parseWithState mempty p "2011/1/1 00:00+1234") startofday ,"defaultyeardirectivep" ~: do assertParse (parseWithState mempty defaultyeardirectivep "Y 2010\n") assertParse (parseWithState mempty defaultyeardirectivep "Y 10001\n") ,"marketpricedirectivep" ~: assertParseEqual (parseWithState mempty marketpricedirectivep "P 2004/05/01 XYZ $55.00\n") (MarketPrice (parsedate "2004/05/01") "XYZ" $ usd 55) ,"ignoredpricecommoditydirectivep" ~: do assertParse (parseWithState mempty ignoredpricecommoditydirectivep "N $\n") ,"defaultcommoditydirectivep" ~: do assertParse (parseWithState mempty defaultcommoditydirectivep "D $1,000.0\n") ,"commodityconversiondirectivep" ~: do assertParse (parseWithState mempty commodityconversiondirectivep "C 1h = $50.00\n") ,"tagdirectivep" ~: do assertParse (parseWithState mempty tagdirectivep "tag foo \n") ,"endtagdirectivep" ~: do assertParse (parseWithState mempty endtagdirectivep "end tag \n") assertParse (parseWithState mempty endtagdirectivep "pop \n") ,"accountnamep" ~: do assertBool "accountnamep parses a normal account name" (isRight $ parsewith accountnamep "a:b:c") assertBool "accountnamep rejects an empty inner component" (isLeft $ parsewith accountnamep "a::c") assertBool "accountnamep rejects an empty leading component" (isLeft $ parsewith accountnamep ":b:c") assertBool "accountnamep rejects an empty trailing component" (isLeft $ parsewith accountnamep "a:b:") ,"leftsymbolamountp" ~: do assertParseEqual (parseWithState mempty leftsymbolamountp "$1") (usd 1 `withPrecision` 0) assertParseEqual (parseWithState mempty leftsymbolamountp "$-1") (usd (-1) `withPrecision` 0) assertParseEqual (parseWithState mempty leftsymbolamountp "-$1") (usd (-1) `withPrecision` 0) ,"amount" ~: do let -- | compare a parse result with an expected amount, showing the debug representation for clarity assertAmountParse parseresult amount = (either (const "parse error") showAmountDebug parseresult) ~?= (showAmountDebug amount) assertAmountParse (parseWithState mempty amountp "1 @ $2") (num 1 `withPrecision` 0 `at` (usd 2 `withPrecision` 0)) ]] -} hledger-lib-1.2/Hledger/Read/TimedotReader.hs0000644000000000000000000001001313035510426017223 0ustar0000000000000000{-| A reader for the "timedot" file format. Example: @ #DATE #ACCT DOTS # Each dot represents 15m, spaces are ignored # on 2/1, 1h was spent on FOSS haskell work, 0.25h on research, etc. 2/1 fos.haskell .... .. biz.research . inc.client1 .... .... .... .... .... .... 2/2 biz.research . inc.client1 .... .... .. @ -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Read.TimedotReader ( -- * Reader reader, -- * Misc other exports timedotfilep, -- * Tests tests_Hledger_Read_TimedotReader ) where import Prelude () import Prelude.Compat import Control.Monad import Control.Monad.Except (ExceptT) import Control.Monad.State.Strict import Data.Char (isSpace) import Data.List (foldl') import Data.Maybe import Data.Text (Text) import Test.HUnit import Text.Megaparsec hiding (parse) import Hledger.Data import Hledger.Read.Common import Hledger.Utils hiding (ptrace) -- easier to toggle this here sometimes -- import qualified Hledger.Utils (ptrace) -- ptrace = Hledger.Utils.ptrace ptrace :: Monad m => a -> m a ptrace = return reader :: Reader reader = Reader {rFormat = "timedot" ,rExtensions = ["timedot"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from the timedot format, or give an error. parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse _ = parseAndFinaliseJournal timedotfilep timedotfilep :: JournalStateParser m ParsedJournal timedotfilep = do many timedotfileitemp eof get where timedotfileitemp :: JournalStateParser m () timedotfileitemp = do ptrace "timedotfileitemp" choice [ void emptyorcommentlinep ,timedotdayp >>= \ts -> modify' (addTransactions ts) ] "timedot day entry, or default year or comment line or blank line" addTransactions :: [Transaction] -> Journal -> Journal addTransactions ts j = foldl' (flip ($)) j (map addTransaction ts) -- | Parse timedot day entries to zero or more time transactions for that day. -- @ -- 2/1 -- fos.haskell .... .. -- biz.research . -- inc.client1 .... .... .... .... .... .... -- @ timedotdayp :: JournalStateParser m [Transaction] timedotdayp = do ptrace " timedotdayp" d <- datep <* lift eolof es <- catMaybes <$> many (const Nothing <$> try emptyorcommentlinep <|> Just <$> (notFollowedBy datep >> timedotentryp)) return $ map (\t -> t{tdate=d}) es -- <$> many timedotentryp -- | Parse a single timedot entry to one (dateless) transaction. -- @ -- fos.haskell .... .. -- @ timedotentryp :: JournalStateParser m Transaction timedotentryp = do ptrace " timedotentryp" pos <- genericSourcePos <$> getPosition lift (many spacenonewline) a <- modifiedaccountnamep lift (many spacenonewline) hours <- try (followingcommentp >> return 0) <|> (timedotdurationp <* (try followingcommentp <|> (newline >> return ""))) let t = nulltransaction{ tsourcepos = pos, tstatus = Cleared, tpostings = [ nullposting{paccount=a ,pamount=Mixed [setAmountPrecision 2 $ num hours] -- don't assume hours; do set precision to 2 ,ptype=VirtualPosting ,ptransaction=Just t } ] } return t timedotdurationp :: JournalStateParser m Quantity timedotdurationp = try timedotnumberp <|> timedotdotsp -- | Parse a duration written as a decimal number of hours (optionally followed by the letter h). -- @ -- 1.5h -- @ timedotnumberp :: JournalStateParser m Quantity timedotnumberp = do (q, _, _, _) <- lift numberp lift (many spacenonewline) optional $ char 'h' lift (many spacenonewline) return q -- | Parse a quantity written as a line of dots, each representing 0.25. -- @ -- .... .. -- @ timedotdotsp :: JournalStateParser m Quantity timedotdotsp = do dots <- filter (not.isSpace) <$> many (oneOf (". " :: [Char])) return $ (/4) $ fromIntegral $ length dots tests_Hledger_Read_TimedotReader = TestList [ ] hledger-lib-1.2/Hledger/Read/TimeclockReader.hs0000644000000000000000000001040113035510426017531 0ustar0000000000000000{-| A reader for the timeclock file format generated by timeclock.el (). Example: @ i 2007\/03\/10 12:26:00 hledger o 2007\/03\/10 17:26:02 @ From timeclock.el 2.6: @ A timeclock contains data in the form of a single entry per line. Each entry has the form: CODE YYYY/MM/DD HH:MM:SS [COMMENT] CODE is one of: b, h, i, o or O. COMMENT is optional when the code is i, o or O. The meanings of the codes are: b Set the current time balance, or \"time debt\". Useful when archiving old log data, when a debt must be carried forward. The COMMENT here is the number of seconds of debt. h Set the required working time for the given day. This must be the first entry for that day. The COMMENT in this case is the number of hours in this workday. Floating point amounts are allowed. i Clock in. The COMMENT in this case should be the name of the project worked on. o Clock out. COMMENT is unnecessary, but can be used to provide a description of how the period went, for example. O Final clock out. Whatever project was being worked on, it is now finished. Useful for creating summary reports. @ -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Read.TimeclockReader ( -- * Reader reader, -- * Misc other exports timeclockfilep, -- * Tests tests_Hledger_Read_TimeclockReader ) where import Prelude () import Prelude.Compat import Control.Monad import Control.Monad.Except (ExceptT) import Control.Monad.State.Strict import Data.Maybe (fromMaybe) import Data.Text (Text) import qualified Data.Text as T import Test.HUnit import Text.Megaparsec hiding (parse) import Hledger.Data -- XXX too much reuse ? import Hledger.Read.Common import Hledger.Utils reader :: Reader reader = Reader {rFormat = "timeclock" ,rExtensions = ["timeclock"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from timeclock.el's timeclock -- format, saving the provided file path and the current time, or give an -- error. parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse _ = parseAndFinaliseJournal timeclockfilep timeclockfilep :: ErroringJournalParser IO ParsedJournal timeclockfilep = do many timeclockitemp eof j@Journal{jparsetimeclockentries=es} <- get -- Convert timeclock entries in this journal to transactions, closing any unfinished sessions. -- Doing this here rather than in journalFinalise means timeclock sessions can't span file boundaries, -- but it simplifies code above. now <- liftIO getCurrentLocalTime -- entries have been parsed in reverse order. timeclockEntriesToTransactions -- expects them to be in normal order, then we must reverse again since -- journalFinalise expects them in reverse order let j' = j{jtxns = reverse $ timeclockEntriesToTransactions now $ reverse es, jparsetimeclockentries = []} return j' where -- As all ledger line types can be distinguished by the first -- character, excepting transactions versus empty (blank or -- comment-only) lines, can use choice w/o try timeclockitemp = choice [ void emptyorcommentlinep , timeclockentryp >>= \e -> modify' (\j -> j{jparsetimeclockentries = e : jparsetimeclockentries j}) ] "timeclock entry, or default year or historical price directive" -- | Parse a timeclock entry. timeclockentryp :: JournalStateParser m TimeclockEntry timeclockentryp = do sourcepos <- genericSourcePos <$> lift getPosition code <- oneOf ("bhioO" :: [Char]) lift (some spacenonewline) datetime <- datetimep account <- fromMaybe "" <$> optional (lift (some spacenonewline) >> modifiedaccountnamep) description <- T.pack . fromMaybe "" <$> lift (optional (some spacenonewline >> restofline)) return $ TimeclockEntry sourcepos (read [code]) datetime account description tests_Hledger_Read_TimeclockReader = TestList [ ] hledger-lib-1.2/Hledger/Reports.hs0000644000000000000000000000234313035210046015260 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Generate several common kinds of report from a journal, as \"*Report\" - simple intermediate data structures intended to be easily rendered as text, html, json, csv etc. by hledger commands, hamlet templates, javascript, or whatever. -} module Hledger.Reports ( module Hledger.Reports.ReportOptions, module Hledger.Reports.EntriesReport, module Hledger.Reports.PostingsReport, module Hledger.Reports.TransactionsReports, module Hledger.Reports.BalanceReport, module Hledger.Reports.MultiBalanceReports, -- module Hledger.Reports.BalanceHistoryReport, -- * Tests tests_Hledger_Reports ) where import Test.HUnit import Hledger.Reports.ReportOptions import Hledger.Reports.EntriesReport import Hledger.Reports.PostingsReport import Hledger.Reports.TransactionsReports import Hledger.Reports.BalanceReport import Hledger.Reports.MultiBalanceReports -- import Hledger.Reports.BalanceHistoryReport tests_Hledger_Reports :: Test tests_Hledger_Reports = TestList $ -- ++ tests_isInterestingIndented [ tests_Hledger_Reports_ReportOptions, tests_Hledger_Reports_EntriesReport, tests_Hledger_Reports_PostingsReport, tests_Hledger_Reports_BalanceReport ] hledger-lib-1.2/Hledger/Reports/ReportOptions.hs0000644000000000000000000003550513066774455020144 0ustar0000000000000000{-# LANGUAGE CPP, RecordWildCards, DeriveDataTypeable #-} {-| Options common to most hledger reports. -} module Hledger.Reports.ReportOptions ( ReportOpts(..), BalanceType(..), AccountListMode(..), FormatStr, defreportopts, rawOptsToReportOpts, checkReportOpts, flat_, tree_, whichDateFromOpts, journalSelectingAmountFromOpts, queryFromOpts, queryFromOptsOnly, queryOptsFromOpts, transactionDateFn, postingDateFn, reportStartDate, reportEndDate, reportStartEndDates, tests_Hledger_Reports_ReportOptions ) where import Data.Data (Data) #if !MIN_VERSION_base(4,8,0) import Data.Functor.Compat ((<$>)) #endif import Data.Maybe import qualified Data.Text as T import Data.Typeable (Typeable) import Data.Time.Calendar import Data.Default import Safe import Test.HUnit import Text.Megaparsec.Error import Hledger.Data import Hledger.Query import Hledger.Utils type FormatStr = String -- | Which "balance" is being shown in a balance report. data BalanceType = PeriodChange -- ^ The change of balance in each period. | CumulativeChange -- ^ The accumulated change across multiple periods. | HistoricalBalance -- ^ The historical ending balance, including the effect of -- all postings before the report period. Unless altered by, -- a query, this is what you would see on a bank statement. deriving (Eq,Show,Data,Typeable) instance Default BalanceType where def = PeriodChange -- | Should accounts be displayed: in the command's default style, hierarchically, or as a flat list ? data AccountListMode = ALDefault | ALTree | ALFlat deriving (Eq, Show, Data, Typeable) instance Default AccountListMode where def = ALDefault -- | Standard options for customising report filtering and output, -- corresponding to hledger's command-line options and query language -- arguments. Used in hledger-lib and above. data ReportOpts = ReportOpts { period_ :: Period ,interval_ :: Interval ,clearedstatus_ :: Maybe ClearedStatus ,cost_ :: Bool ,depth_ :: Maybe Int ,display_ :: Maybe DisplayExp ,date2_ :: Bool ,empty_ :: Bool ,no_elide_ :: Bool ,real_ :: Bool ,format_ :: Maybe FormatStr ,query_ :: String -- all arguments, as a string -- register only ,average_ :: Bool ,related_ :: Bool -- balance only ,balancetype_ :: BalanceType ,accountlistmode_ :: AccountListMode ,drop_ :: Int ,row_total_ :: Bool ,no_total_ :: Bool ,value_ :: Bool ,pretty_tables_ :: Bool } deriving (Show, Data, Typeable) instance Default ReportOpts where def = defreportopts instance Default Bool where def = False defreportopts :: ReportOpts defreportopts = ReportOpts def def def def def def def def def def def def def def def def def def def def def rawOptsToReportOpts :: RawOpts -> IO ReportOpts rawOptsToReportOpts rawopts = checkReportOpts <$> do d <- getCurrentDay let rawopts' = checkRawOpts rawopts return defreportopts{ period_ = periodFromRawOpts d rawopts' ,interval_ = intervalFromRawOpts rawopts' ,clearedstatus_ = clearedStatusFromRawOpts rawopts' ,cost_ = boolopt "cost" rawopts' ,depth_ = maybeintopt "depth" rawopts' ,display_ = maybedisplayopt d rawopts' ,date2_ = boolopt "date2" rawopts' ,empty_ = boolopt "empty" rawopts' ,no_elide_ = boolopt "no-elide" rawopts' ,real_ = boolopt "real" rawopts' ,format_ = maybestringopt "format" rawopts' -- XXX move to CliOpts or move validation from Cli.CliOptions to here ,query_ = unwords $ listofstringopt "args" rawopts' -- doesn't handle an arg like "" right ,average_ = boolopt "average" rawopts' ,related_ = boolopt "related" rawopts' ,balancetype_ = balancetypeopt rawopts' ,accountlistmode_ = accountlistmodeopt rawopts' ,drop_ = intopt "drop" rawopts' ,row_total_ = boolopt "row-total" rawopts' ,no_total_ = boolopt "no-total" rawopts' ,value_ = boolopt "value" rawopts' ,pretty_tables_ = boolopt "pretty-tables" rawopts' } -- | Do extra validation of raw option values, raising an error if there's a problem. checkRawOpts :: RawOpts -> RawOpts checkRawOpts rawopts -- our standard behaviour is to accept conflicting options actually, -- using the last one - more forgiving for overriding command-line aliases -- | countopts ["change","cumulative","historical"] > 1 -- = usageError "please specify at most one of --change, --cumulative, --historical" -- | countopts ["flat","tree"] > 1 -- = usageError "please specify at most one of --flat, --tree" -- | countopts ["daily","weekly","monthly","quarterly","yearly"] > 1 -- = usageError "please specify at most one of --daily, " | otherwise = rawopts -- where -- countopts = length . filter (`boolopt` rawopts) -- | Do extra validation of report options, raising an error if there's a problem. checkReportOpts :: ReportOpts -> ReportOpts checkReportOpts ropts@ReportOpts{..} = either usageError (const ropts) $ do case depth_ of Just d | d < 0 -> Left "--depth should have a positive number" _ -> Right () accountlistmodeopt :: RawOpts -> AccountListMode accountlistmodeopt rawopts = case reverse $ filter (`elem` ["tree","flat"]) $ map fst rawopts of ("tree":_) -> ALTree ("flat":_) -> ALFlat _ -> ALDefault balancetypeopt :: RawOpts -> BalanceType balancetypeopt rawopts = case reverse $ filter (`elem` ["change","cumulative","historical"]) $ map fst rawopts of ("historical":_) -> HistoricalBalance ("cumulative":_) -> CumulativeChange _ -> PeriodChange -- Get the period specified by the intersection of -b/--begin, -e/--end and/or -- -p/--period options, using the given date to interpret relative date expressions. periodFromRawOpts :: Day -> RawOpts -> Period periodFromRawOpts d rawopts = case (mearliestb, mlateste) of (Nothing, Nothing) -> PeriodAll (Just b, Nothing) -> PeriodFrom b (Nothing, Just e) -> PeriodTo e (Just b, Just e) -> simplifyPeriod $ PeriodBetween b e where mearliestb = case beginDatesFromRawOpts d rawopts of [] -> Nothing bs -> Just $ minimum bs mlateste = case endDatesFromRawOpts d rawopts of [] -> Nothing es -> Just $ maximum es -- Get all begin dates specified by -b/--begin or -p/--period options, in order, -- using the given date to interpret relative date expressions. beginDatesFromRawOpts :: Day -> RawOpts -> [Day] beginDatesFromRawOpts d = catMaybes . map (begindatefromrawopt d) where begindatefromrawopt d (n,v) | n == "begin" = either (\e -> usageError $ "could not parse "++n++" date: "++parseErrorPretty e) Just $ fixSmartDateStrEither' d (T.pack v) | n == "period" = case either (\e -> usageError $ "could not parse period option: "++parseErrorPretty e) id $ parsePeriodExpr d (stripquotes $ T.pack v) of (_, DateSpan (Just b) _) -> Just b _ -> Nothing | otherwise = Nothing -- Get all end dates specified by -e/--end or -p/--period options, in order, -- using the given date to interpret relative date expressions. endDatesFromRawOpts :: Day -> RawOpts -> [Day] endDatesFromRawOpts d = catMaybes . map (enddatefromrawopt d) where enddatefromrawopt d (n,v) | n == "end" = either (\e -> usageError $ "could not parse "++n++" date: "++parseErrorPretty e) Just $ fixSmartDateStrEither' d (T.pack v) | n == "period" = case either (\e -> usageError $ "could not parse period option: "++parseErrorPretty e) id $ parsePeriodExpr d (stripquotes $ T.pack v) of (_, DateSpan _ (Just e)) -> Just e _ -> Nothing | otherwise = Nothing -- | Get the report interval, if any, specified by the last of -p/--period, -- -D/--daily, -W/--weekly, -M/--monthly etc. options. intervalFromRawOpts :: RawOpts -> Interval intervalFromRawOpts = lastDef NoInterval . catMaybes . map intervalfromrawopt where intervalfromrawopt (n,v) | n == "period" = either (\e -> usageError $ "could not parse period option: "++parseErrorPretty e) (Just . fst) $ parsePeriodExpr nulldate (stripquotes $ T.pack v) -- reference date does not affect the interval | n == "daily" = Just $ Days 1 | n == "weekly" = Just $ Weeks 1 | n == "monthly" = Just $ Months 1 | n == "quarterly" = Just $ Quarters 1 | n == "yearly" = Just $ Years 1 | otherwise = Nothing -- | Get the cleared status, if any, specified by the last of -C/--cleared, -- --pending, -U/--uncleared options. clearedStatusFromRawOpts :: RawOpts -> Maybe ClearedStatus clearedStatusFromRawOpts = lastMay . catMaybes . map clearedstatusfromrawopt where clearedstatusfromrawopt (n,_) | n == "cleared" = Just Cleared | n == "pending" = Just Pending | n == "uncleared" = Just Uncleared | otherwise = Nothing type DisplayExp = String maybedisplayopt :: Day -> RawOpts -> Maybe DisplayExp maybedisplayopt d rawopts = maybe Nothing (Just . regexReplaceBy "\\[.+?\\]" fixbracketeddatestr) $ maybestringopt "display" rawopts where fixbracketeddatestr "" = "" fixbracketeddatestr s = "[" ++ fixSmartDateStr d (T.pack $ init $ tail s) ++ "]" -- | Select the Transaction date accessor based on --date2. transactionDateFn :: ReportOpts -> (Transaction -> Day) transactionDateFn ReportOpts{..} = if date2_ then transactionDate2 else tdate -- | Select the Posting date accessor based on --date2. postingDateFn :: ReportOpts -> (Posting -> Day) postingDateFn ReportOpts{..} = if date2_ then postingDate2 else postingDate -- | Report which date we will report on based on --date2. whichDateFromOpts :: ReportOpts -> WhichDate whichDateFromOpts ReportOpts{..} = if date2_ then SecondaryDate else PrimaryDate -- | Legacy-compatible convenience aliases for accountlistmode_. tree_ :: ReportOpts -> Bool tree_ = (==ALTree) . accountlistmode_ flat_ :: ReportOpts -> Bool flat_ = (==ALFlat) . accountlistmode_ -- depthFromOpts :: ReportOpts -> Int -- depthFromOpts opts = min (fromMaybe 99999 $ depth_ opts) (queryDepth $ queryFromOpts nulldate opts) -- | Convert this journal's postings' amounts to the cost basis amounts if -- specified by options. journalSelectingAmountFromOpts :: ReportOpts -> Journal -> Journal journalSelectingAmountFromOpts opts | cost_ opts = journalConvertAmountsToCost | otherwise = id -- | Convert report options and arguments to a query. queryFromOpts :: Day -> ReportOpts -> Query queryFromOpts d ReportOpts{..} = simplifyQuery $ And $ [flagsq, argsq] where flagsq = And $ [(if date2_ then Date2 else Date) $ periodAsDateSpan period_] ++ (if real_ then [Real True] else []) ++ (if empty_ then [Empty True] else []) -- ? ++ (maybe [] ((:[]) . Status) clearedstatus_) ++ (maybe [] ((:[]) . Depth) depth_) argsq = fst $ parseQuery d (T.pack query_) -- | Convert report options to a query, ignoring any non-flag command line arguments. queryFromOptsOnly :: Day -> ReportOpts -> Query queryFromOptsOnly _d ReportOpts{..} = simplifyQuery flagsq where flagsq = And $ [(if date2_ then Date2 else Date) $ periodAsDateSpan period_] ++ (if real_ then [Real True] else []) ++ (if empty_ then [Empty True] else []) -- ? ++ (maybe [] ((:[]) . Status) clearedstatus_) ++ (maybe [] ((:[]) . Depth) depth_) tests_queryFromOpts :: [Test] tests_queryFromOpts = [ "queryFromOpts" ~: do assertEqual "" Any (queryFromOpts nulldate defreportopts) assertEqual "" (Acct "a") (queryFromOpts nulldate defreportopts{query_="a"}) assertEqual "" (Desc "a a") (queryFromOpts nulldate defreportopts{query_="desc:'a a'"}) assertEqual "" (Date $ mkdatespan "2012/01/01" "2013/01/01") (queryFromOpts nulldate defreportopts{period_=PeriodFrom (parsedate "2012/01/01") ,query_="date:'to 2013'" }) assertEqual "" (Date2 $ mkdatespan "2012/01/01" "2013/01/01") (queryFromOpts nulldate defreportopts{query_="date2:'in 2012'"}) assertEqual "" (Or [Acct "a a", Acct "'b"]) (queryFromOpts nulldate defreportopts{query_="'a a' 'b"}) ] -- | Convert report options and arguments to query options. queryOptsFromOpts :: Day -> ReportOpts -> [QueryOpt] queryOptsFromOpts d ReportOpts{..} = flagsqopts ++ argsqopts where flagsqopts = [] argsqopts = snd $ parseQuery d (T.pack query_) tests_queryOptsFromOpts :: [Test] tests_queryOptsFromOpts = [ "queryOptsFromOpts" ~: do assertEqual "" [] (queryOptsFromOpts nulldate defreportopts) assertEqual "" [] (queryOptsFromOpts nulldate defreportopts{query_="a"}) assertEqual "" [] (queryOptsFromOpts nulldate defreportopts{period_=PeriodFrom (parsedate "2012/01/01") ,query_="date:'to 2013'" }) ] -- | The effective report start date is the one specified by options or queries, -- otherwise the earliest transaction or posting date in the journal, -- otherwise (for an empty journal) nothing. -- Needs IO to parse smart dates in options/queries. reportStartDate :: Journal -> ReportOpts -> IO (Maybe Day) reportStartDate j ropts = (fst <$>) <$> reportStartEndDates j ropts -- | The effective report end date is the one specified by options or queries, -- otherwise the latest transaction or posting date in the journal, -- otherwise (for an empty journal) nothing. -- Needs IO to parse smart dates in options/queries. reportEndDate :: Journal -> ReportOpts -> IO (Maybe Day) reportEndDate j ropts = (snd <$>) <$> reportStartEndDates j ropts reportStartEndDates :: Journal -> ReportOpts -> IO (Maybe (Day,Day)) reportStartEndDates j ropts = do today <- getCurrentDay let q = queryFromOpts today ropts mrequestedstartdate = queryStartDate False q mrequestedenddate = queryEndDate False q return $ case journalDateSpan False j of -- don't bother with secondary dates DateSpan (Just journalstartdate) (Just journalenddate) -> Just (fromMaybe journalstartdate mrequestedstartdate, fromMaybe journalenddate mrequestedenddate) _ -> Nothing tests_Hledger_Reports_ReportOptions :: Test tests_Hledger_Reports_ReportOptions = TestList $ tests_queryFromOpts ++ tests_queryOptsFromOpts hledger-lib-1.2/Hledger/Reports/BalanceHistoryReport.hs0000644000000000000000000000173513035210046021367 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Account balance history report. -} -- XXX not used module Hledger.Reports.BalanceHistoryReport ( accountBalanceHistory -- -- * Tests -- tests_Hledger_Reports_BalanceReport ) where import Data.Time.Calendar -- import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Reports.ReportOptions import Hledger.Reports.TransactionsReports -- | Get the historical running inclusive balance of a particular account, -- from earliest to latest posting date. accountBalanceHistory :: ReportOpts -> Journal -> Account -> [(Day, MixedAmount)] accountBalanceHistory ropts j a = [(getdate t, bal) | (t,_,_,_,_,bal) <- items] where (_,items) = journalTransactionsReport ropts j acctquery inclusivebal = True acctquery = Acct $ (if inclusivebal then accountNameToAccountRegex else accountNameToAccountOnlyRegex) $ aname a getdate = if date2_ ropts then transactionDate2 else tdate hledger-lib-1.2/Hledger/Reports/BalanceReport.hs0000644000000000000000000003664313067565120020026 0ustar0000000000000000{-| Balance report, used by the balance command. -} {-# LANGUAGE FlexibleInstances, ScopedTypeVariables, OverloadedStrings #-} module Hledger.Reports.BalanceReport ( BalanceReport, BalanceReportItem, balanceReport, balanceReportValue, mixedAmountValue, amountValue, flatShowsExclusiveBalance, -- * Tests tests_Hledger_Reports_BalanceReport ) where import Data.List import Data.Ord import Data.Maybe import Data.Time.Calendar import Test.HUnit import qualified Data.Text as T import Hledger.Data import Hledger.Read (mamountp') import Hledger.Query import Hledger.Utils import Hledger.Reports.ReportOptions -- | A simple single-column balance report. It has: -- -- 1. a list of items, one per account, each containing: -- -- * the full account name -- -- * the Ledger-style elided short account name -- (the leaf account name, prefixed by any boring parents immediately above); -- or with --flat, the full account name again -- -- * the number of indentation steps for rendering a Ledger-style account tree, -- taking into account elided boring parents, --no-elide and --flat -- -- * an amount -- -- 2. the total of all amounts -- type BalanceReport = ([BalanceReportItem], MixedAmount) type BalanceReportItem = (AccountName, AccountName, Int, MixedAmount) -- | When true (the default), this makes balance --flat reports and their implementation clearer. -- Single/multi-col balance reports currently aren't all correct if this is false. flatShowsExclusiveBalance = True -- | Enabling this makes balance --flat --empty also show parent accounts without postings, -- in addition to those with postings and a zero balance. Disabling it shows only the latter. -- No longer supported, but leave this here for a bit. -- flatShowsPostinglessAccounts = True -- | Generate a simple balance report, containing the matched accounts and -- their balances (change of balance) during the specified period. -- This is like PeriodChangeReport with a single column (but more mature, -- eg this can do hierarchical display). balanceReport :: ReportOpts -> Query -> Journal -> BalanceReport balanceReport opts q j = (items, total) where -- dbg1 = const id -- exclude from debug output dbg1 s = let p = "balanceReport" in Hledger.Utils.dbg1 (p++" "++s) -- add prefix in debug output accts = ledgerRootAccount $ ledgerFromJournal q $ journalSelectingAmountFromOpts opts j accts' :: [Account] | queryDepth q == 0 = dbg1 "accts" $ take 1 $ clipAccountsAndAggregate (queryDepth q) $ flattenAccounts accts | flat_ opts = dbg1 "accts" $ filterzeros $ filterempty $ drop 1 $ clipAccountsAndAggregate (queryDepth q) $ flattenAccounts accts | otherwise = dbg1 "accts" $ filter (not.aboring) $ drop 1 $ flattenAccounts $ markboring $ prunezeros $ clipAccounts (queryDepth q) accts where balance = if flat_ opts then aebalance else aibalance filterzeros = if empty_ opts then id else filter (not . isZeroMixedAmount . balance) filterempty = filter (\a -> anumpostings a > 0 || not (isZeroMixedAmount (balance a))) prunezeros = if empty_ opts then id else fromMaybe nullacct . pruneAccounts (isZeroMixedAmount . balance) markboring = if no_elide_ opts then id else markBoringParentAccounts items = dbg1 "items" $ map (balanceReportItem opts q) accts' total | not (flat_ opts) = dbg1 "total" $ sum [amt | (_,_,indent,amt) <- items, indent == 0] | otherwise = dbg1 "total" $ if flatShowsExclusiveBalance then sum $ map fourth4 items else sum $ map aebalance $ clipAccountsAndAggregate 1 accts' -- | In an account tree with zero-balance leaves removed, mark the -- elidable parent accounts (those with one subaccount and no balance -- of their own). markBoringParentAccounts :: Account -> Account markBoringParentAccounts = tieAccountParents . mapAccounts mark where mark a | length (asubs a) == 1 && isZeroMixedAmount (aebalance a) = a{aboring=True} | otherwise = a balanceReportItem :: ReportOpts -> Query -> Account -> BalanceReportItem balanceReportItem opts q a | flat_ opts = (name, name, 0, (if flatShowsExclusiveBalance then aebalance else aibalance) a) | otherwise = (name, elidedname, indent, aibalance a) where name | queryDepth q > 0 = aname a | otherwise = "..." elidedname = accountNameFromComponents (adjacentboringparentnames ++ [accountLeafName name]) adjacentboringparentnames = reverse $ map (accountLeafName.aname) $ takeWhile aboring parents indent = length $ filter (not.aboring) parents -- parents exclude the tree's root node parents = case parentAccounts a of [] -> [] as -> init as -- -- the above using the newer multi balance report code: -- balanceReport' opts q j = (items, total) -- where -- MultiBalanceReport (_,mbrrows,mbrtotals) = PeriodChangeReport opts q j -- items = [(a,a',n, headDef 0 bs) | ((a,a',n), bs) <- mbrrows] -- total = headDef 0 mbrtotals -- | Convert all the amounts in a single-column balance report to -- their value on the given date in their default valuation -- commodities. balanceReportValue :: Journal -> Day -> BalanceReport -> BalanceReport balanceReportValue j d r = r' where (items,total) = r r' = dbg8 "known market prices" (jmarketprices j) `seq` dbg8 "report end date" d `seq` dbg8 "balanceReportValue" ([(n, n', i, mixedAmountValue j d a) |(n,n',i,a) <- items], mixedAmountValue j d total) mixedAmountValue :: Journal -> Day -> MixedAmount -> MixedAmount mixedAmountValue j d (Mixed as) = Mixed $ map (amountValue j d) as -- | Find the market value of this amount on the given date, in it's -- default valuation commodity, based on recorded market prices. -- If no default valuation commodity can be found, the amount is left -- unchanged. amountValue :: Journal -> Day -> Amount -> Amount amountValue j d a = case commodityValue j d (acommodity a) of Just v -> v{aquantity=aquantity v * aquantity a ,aprice=aprice a } Nothing -> a -- | Find the market value, if known, of one unit of this commodity (A) on -- the given valuation date, in the commodity (B) mentioned in the latest -- applicable market price. The latest applicable market price is the market -- price directive for commodity A with the latest date that is on or before -- the valuation date; or if there are multiple such prices with the same date, -- the last parsed. commodityValue :: Journal -> Day -> CommoditySymbol -> Maybe Amount commodityValue j valuationdate c | null applicableprices = dbg Nothing | otherwise = dbg $ Just $ mpamount $ last applicableprices where dbg = dbg8 ("using market price for "++T.unpack c) applicableprices = [p | p <- sortBy (comparing mpdate) $ jmarketprices j , mpcommodity p == c , mpdate p <= valuationdate ] tests_balanceReport = let (opts,journal) `gives` r = do let (eitems, etotal) = r (aitems, atotal) = balanceReport opts (queryFromOpts nulldate opts) journal showw (acct,acct',indent,amt) = (acct, acct', indent, showMixedAmountDebug amt) assertEqual "items" (map showw eitems) (map showw aitems) assertEqual "total" (showMixedAmountDebug etotal) (showMixedAmountDebug atotal) usd0 = usd 0 in [ "balanceReport with no args on null journal" ~: do (defreportopts, nulljournal) `gives` ([], Mixed [nullamt]) ,"balanceReport with no args on sample journal" ~: do (defreportopts, samplejournal) `gives` ([ ("assets","assets",0, mamountp' "$-1.00") ,("assets:bank:saving","bank:saving",1, mamountp' "$1.00") ,("assets:cash","cash",1, mamountp' "$-2.00") ,("expenses","expenses",0, mamountp' "$2.00") ,("expenses:food","food",1, mamountp' "$1.00") ,("expenses:supplies","supplies",1, mamountp' "$1.00") ,("income","income",0, mamountp' "$-2.00") ,("income:gifts","gifts",1, mamountp' "$-1.00") ,("income:salary","salary",1, mamountp' "$-1.00") ,("liabilities:debts","liabilities:debts",0, mamountp' "$1.00") ], Mixed [usd0]) ,"balanceReport with --depth=N" ~: do (defreportopts{depth_=Just 1}, samplejournal) `gives` ([ ("assets", "assets", 0, mamountp' "$-1.00") ,("expenses", "expenses", 0, mamountp' "$2.00") ,("income", "income", 0, mamountp' "$-2.00") ,("liabilities", "liabilities", 0, mamountp' "$1.00") ], Mixed [usd0]) ,"balanceReport with depth:N" ~: do (defreportopts{query_="depth:1"}, samplejournal) `gives` ([ ("assets", "assets", 0, mamountp' "$-1.00") ,("expenses", "expenses", 0, mamountp' "$2.00") ,("income", "income", 0, mamountp' "$-2.00") ,("liabilities", "liabilities", 0, mamountp' "$1.00") ], Mixed [usd0]) ,"balanceReport with a date or secondary date span" ~: do (defreportopts{query_="date:'in 2009'"}, samplejournal2) `gives` ([], Mixed [nullamt]) (defreportopts{query_="date2:'in 2009'"}, samplejournal2) `gives` ([ ("assets:bank:checking","assets:bank:checking",0,mamountp' "$1.00") ,("income:salary","income:salary",0,mamountp' "$-1.00") ], Mixed [usd0]) ,"balanceReport with desc:" ~: do (defreportopts{query_="desc:income"}, samplejournal) `gives` ([ ("assets:bank:checking","assets:bank:checking",0,mamountp' "$1.00") ,("income:salary","income:salary",0, mamountp' "$-1.00") ], Mixed [usd0]) ,"balanceReport with not:desc:" ~: do (defreportopts{query_="not:desc:income"}, samplejournal) `gives` ([ ("assets","assets",0, mamountp' "$-2.00") ,("assets:bank","bank",1, Mixed [usd0]) ,("assets:bank:checking","checking",2,mamountp' "$-1.00") ,("assets:bank:saving","saving",2, mamountp' "$1.00") ,("assets:cash","cash",1, mamountp' "$-2.00") ,("expenses","expenses",0, mamountp' "$2.00") ,("expenses:food","food",1, mamountp' "$1.00") ,("expenses:supplies","supplies",1, mamountp' "$1.00") ,("income:gifts","income:gifts",0, mamountp' "$-1.00") ,("liabilities:debts","liabilities:debts",0, mamountp' "$1.00") ], Mixed [usd0]) {- ,"accounts report with account pattern o" ~: defreportopts{patterns_=["o"]} `gives` [" $1 expenses:food" ," $-2 income" ," $-1 gifts" ," $-1 salary" ,"--------------------" ," $-1" ] ,"accounts report with account pattern o and --depth 1" ~: defreportopts{patterns_=["o"],depth_=Just 1} `gives` [" $1 expenses" ," $-2 income" ,"--------------------" ," $-1" ] ,"accounts report with account pattern a" ~: defreportopts{patterns_=["a"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ," $-1 income:salary" ," $1 liabilities:debts" ,"--------------------" ," $-1" ] ,"accounts report with account pattern e" ~: defreportopts{patterns_=["e"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ," $2 expenses" ," $1 food" ," $1 supplies" ," $-2 income" ," $-1 gifts" ," $-1 salary" ," $1 liabilities:debts" ,"--------------------" ," 0" ] ,"accounts report with unmatched parent of two matched subaccounts" ~: defreportopts{patterns_=["cash","saving"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ,"--------------------" ," $-1" ] ,"accounts report with multi-part account name" ~: defreportopts{patterns_=["expenses:food"]} `gives` [" $1 expenses:food" ,"--------------------" ," $1" ] ,"accounts report with negative account pattern" ~: defreportopts{patterns_=["not:assets"]} `gives` [" $2 expenses" ," $1 food" ," $1 supplies" ," $-2 income" ," $-1 gifts" ," $-1 salary" ," $1 liabilities:debts" ,"--------------------" ," $1" ] ,"accounts report negative account pattern always matches full name" ~: defreportopts{patterns_=["not:e"]} `gives` ["--------------------" ," 0" ] ,"accounts report negative patterns affect totals" ~: defreportopts{patterns_=["expenses","not:food"]} `gives` [" $1 expenses:supplies" ,"--------------------" ," $1" ] ,"accounts report with -E shows zero-balance accounts" ~: defreportopts{patterns_=["assets"],empty_=True} `gives` [" $-1 assets" ," $1 bank" ," 0 checking" ," $1 saving" ," $-2 cash" ,"--------------------" ," $-1" ] ,"accounts report with cost basis" ~: do j <- (readJournal Nothing Nothing Nothing $ unlines ["" ,"2008/1/1 test " ," a:b 10h @ $50" ," c:d " ]) >>= either error' return let j' = journalCanonicaliseAmounts $ journalConvertAmountsToCost j -- enable cost basis adjustment balanceReportAsText defreportopts (balanceReport defreportopts Any j') `is` [" $500 a:b" ," $-500 c:d" ,"--------------------" ," 0" ] -} ] Right samplejournal2 = journalBalanceTransactions False nulljournal{ jtxns = [ txnTieKnot Transaction{ tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/01/01", tdate2=Just $ parsedate "2009/01/01", tstatus=Uncleared, tcode="", tdescription="income", tcomment="", ttags=[], tpostings= [posting {paccount="assets:bank:checking", pamount=Mixed [usd 1]} ,posting {paccount="income:salary", pamount=missingmixedamt} ], tpreceding_comment_lines="" } ] } -- tests_isInterestingIndented = [ -- "isInterestingIndented" ~: do -- let (opts, journal, acctname) `gives` r = isInterestingIndented opts l acctname `is` r -- where l = ledgerFromJournal (queryFromOpts nulldate opts) journal -- (defreportopts, samplejournal, "expenses") `gives` True -- ] tests_Hledger_Reports_BalanceReport :: Test tests_Hledger_Reports_BalanceReport = TestList tests_balanceReport hledger-lib-1.2/Hledger/Reports/EntriesReport.hs0000644000000000000000000000263613035210046020072 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Journal entries report, used by the print command. -} module Hledger.Reports.EntriesReport ( EntriesReport, EntriesReportItem, entriesReport, -- * Tests tests_Hledger_Reports_EntriesReport ) where import Data.List import Data.Ord import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Reports.ReportOptions -- | A journal entries report is a list of whole transactions as -- originally entered in the journal (mostly). This is used by eg -- hledger's print command and hledger-web's journal entries view. type EntriesReport = [EntriesReportItem] type EntriesReportItem = Transaction -- | Select transactions for an entries report. entriesReport :: ReportOpts -> Query -> Journal -> EntriesReport entriesReport opts q j = sortBy (comparing date) $ filter (q `matchesTransaction`) ts where date = transactionDateFn opts ts = jtxns $ journalSelectingAmountFromOpts opts j tests_entriesReport :: [Test] tests_entriesReport = [ "entriesReport" ~: do assertEqual "not acct" 1 (length $ entriesReport defreportopts (Not $ Acct "bank") samplejournal) let sp = mkdatespan "2008/06/01" "2008/07/01" assertEqual "date" 3 (length $ entriesReport defreportopts (Date sp) samplejournal) ] tests_Hledger_Reports_EntriesReport :: Test tests_Hledger_Reports_EntriesReport = TestList $ tests_entriesReport hledger-lib-1.2/Hledger/Reports/MultiBalanceReports.hs0000644000000000000000000002371013067565677021234 0ustar0000000000000000{-# LANGUAGE FlexibleInstances, ScopedTypeVariables #-} {-| Multi-column balance reports, used by the balance command. -} module Hledger.Reports.MultiBalanceReports ( MultiBalanceReport(..), MultiBalanceReportRow, multiBalanceReport, multiBalanceReportValue, singleBalanceReport -- -- * Tests -- tests_Hledger_Reports_MultiBalanceReport ) where import Data.List import Data.Maybe import Data.Ord import Data.Time.Calendar import Safe -- import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Utils import Hledger.Reports.ReportOptions import Hledger.Reports.BalanceReport -- | A multi balance report is a balance report with one or more columns. It has: -- -- 1. a list of each column's period (date span) -- -- 2. a list of row items, each containing: -- -- * the full account name -- -- * the leaf account name -- -- * the account's depth -- -- * the amounts to show in each column -- -- * the total of the row's amounts -- -- * the average of the row's amounts -- -- 3. the column totals and the overall total and average -- -- The meaning of the amounts depends on the type of multi balance -- report, of which there are three: periodic, cumulative and historical -- (see 'BalanceType' and "Hledger.Cli.Balance"). newtype MultiBalanceReport = MultiBalanceReport ([DateSpan] ,[MultiBalanceReportRow] ,MultiBalanceReportTotals ) type MultiBalanceReportRow = (AccountName, AccountName, Int, [MixedAmount], MixedAmount, MixedAmount) type MultiBalanceReportTotals = ([MixedAmount], MixedAmount, MixedAmount) instance Show MultiBalanceReport where -- use ppShow to break long lists onto multiple lines -- we add some bogus extra shows here to help ppShow parse the output -- and wrap tuples and lists properly show (MultiBalanceReport (spans, items, totals)) = "MultiBalanceReport (ignore extra quotes):\n" ++ ppShow (show spans, map show items, totals) -- type alias just to remind us which AccountNames might be depth-clipped, below. type ClippedAccountName = AccountName -- | Generates a single column BalanceReport like balanceReport, but uses -- multiBalanceReport, so supports --historical. -- TODO Does not support boring parent eliding or --flat yet. singleBalanceReport :: ReportOpts -> Query -> Journal -> BalanceReport singleBalanceReport opts q j = (rows', total) where MultiBalanceReport (_, rows, (totals, _, _)) = multiBalanceReport opts q j rows' = [(a ,if flat_ opts then a else a' -- BalanceReport expects full account name here with --flat ,if tree_ opts then d-1 else 0 -- BalanceReport uses 0-based account depths , headDef nullmixedamt amts -- 0 columns is illegal, should not happen, return zeroes if it does ) | (a,a',d, amts, _, _) <- rows] total = headDef nullmixedamt totals -- | Generate a multicolumn balance report for the matched accounts, -- showing the change of balance, accumulated balance, or historical balance -- in each of the specified periods. multiBalanceReport :: ReportOpts -> Query -> Journal -> MultiBalanceReport multiBalanceReport opts q j = MultiBalanceReport (displayspans, items, totalsrow) where symq = dbg1 "symq" $ filterQuery queryIsSym $ dbg1 "requested q" q depthq = dbg1 "depthq" $ filterQuery queryIsDepth q depth = queryDepth depthq depthless = dbg1 "depthless" . filterQuery (not . queryIsDepth) datelessq = dbg1 "datelessq" $ filterQuery (not . queryIsDateOrDate2) q dateqcons = if date2_ opts then Date2 else Date precedingq = dbg1 "precedingq" $ And [datelessq, dateqcons $ DateSpan Nothing (spanStart reportspan)] requestedspan = dbg1 "requestedspan" $ queryDateSpan (date2_ opts) q -- span specified by -b/-e/-p options and query args requestedspan' = dbg1 "requestedspan'" $ requestedspan `spanDefaultsFrom` journalDateSpan (date2_ opts) j -- if open-ended, close it using the journal's end dates intervalspans = dbg1 "intervalspans" $ splitSpan (interval_ opts) requestedspan' -- interval spans enclosing it reportspan = dbg1 "reportspan" $ DateSpan (maybe Nothing spanStart $ headMay intervalspans) -- the requested span enlarged to a whole number of intervals (maybe Nothing spanEnd $ lastMay intervalspans) newdatesq = dbg1 "newdateq" $ dateqcons reportspan reportq = dbg1 "reportq" $ depthless $ And [datelessq, newdatesq] -- user's query enlarged to whole intervals and with no depth limit ps :: [Posting] = dbg1 "ps" $ journalPostings $ filterJournalAmounts symq $ -- remove amount parts excluded by cur: filterJournalPostings reportq $ -- remove postings not matched by (adjusted) query journalSelectingAmountFromOpts opts j displayspans = dbg1 "displayspans" $ splitSpan (interval_ opts) displayspan where displayspan | empty_ opts = dbg1 "displayspan (-E)" reportspan -- all the requested intervals | otherwise = dbg1 "displayspan" $ requestedspan `spanIntersect` matchedspan -- exclude leading/trailing empty intervals matchedspan = dbg1 "matchedspan" $ postingsDateSpan' (whichDateFromOpts opts) ps psPerSpan :: [[Posting]] = dbg1 "psPerSpan" [filter (isPostingInDateSpan' (whichDateFromOpts opts) s) ps | s <- displayspans] postedAcctBalChangesPerSpan :: [[(ClippedAccountName, MixedAmount)]] = dbg1 "postedAcctBalChangesPerSpan" $ map postingAcctBals psPerSpan where postingAcctBals :: [Posting] -> [(ClippedAccountName, MixedAmount)] postingAcctBals ps = [(aname a, (if tree_ opts then aibalance else aebalance) a) | a <- as] where as = depthLimit $ (if tree_ opts then id else filter ((>0).anumpostings)) $ drop 1 $ accountsFromPostings ps depthLimit | tree_ opts = filter ((depthq `matchesAccount`).aname) -- exclude deeper balances | otherwise = clipAccountsAndAggregate depth -- aggregate deeper balances at the depth limit postedAccts :: [AccountName] = dbg1 "postedAccts" $ sort $ accountNamesFromPostings ps -- starting balances and accounts from transactions before the report start date startacctbals = dbg1 "startacctbals" $ map (\(a,_,_,b) -> (a,b)) startbalanceitems where (startbalanceitems,_) = dbg1 "starting balance report" $ balanceReport opts' precedingq j where opts' | tree_ opts = opts{no_elide_=True} | otherwise = opts{accountlistmode_=ALFlat} startingBalanceFor a = fromMaybe nullmixedamt $ lookup a startacctbals startAccts = dbg1 "startAccts" $ map fst startacctbals displayedAccts :: [ClippedAccountName] = dbg1 "displayedAccts" $ (if tree_ opts then expandAccountNames else id) $ nub $ map (clipOrEllipsifyAccountName depth) $ if empty_ opts then nub $ sort $ startAccts ++ postedAccts else postedAccts acctBalChangesPerSpan :: [[(ClippedAccountName, MixedAmount)]] = dbg1 "acctBalChangesPerSpan" [sortBy (comparing fst) $ unionBy (\(a,_) (a',_) -> a == a') postedacctbals zeroes | postedacctbals <- postedAcctBalChangesPerSpan] where zeroes = [(a, nullmixedamt) | a <- displayedAccts] acctBalChanges :: [(ClippedAccountName, [MixedAmount])] = dbg1 "acctBalChanges" [(a, map snd abs) | abs@((a,_):_) <- transpose acctBalChangesPerSpan] -- never null, or used when null... items :: [MultiBalanceReportRow] = dbg1 "items" [(a, accountLeafName a, accountNameLevel a, displayedBals, rowtot, rowavg) | (a,changes) <- acctBalChanges , let displayedBals = case balancetype_ opts of HistoricalBalance -> drop 1 $ scanl (+) (startingBalanceFor a) changes CumulativeChange -> drop 1 $ scanl (+) nullmixedamt changes _ -> changes , let rowtot = sum displayedBals , let rowavg = averageMixedAmounts displayedBals , empty_ opts || depth == 0 || any (not . isZeroMixedAmount) displayedBals ] totals :: [MixedAmount] = -- dbg1 "totals" $ map sum balsbycol where balsbycol = transpose [bs | (a,_,_,bs,_,_) <- items, not (tree_ opts) || a `elem` highestlevelaccts] highestlevelaccts = dbg1 "highestlevelaccts" [a | a <- displayedAccts, not $ any (`elem` displayedAccts) $ init $ expandAccountName a] totalsrow :: MultiBalanceReportTotals = dbg1 "totalsrow" (totals, sum totals, averageMixedAmounts totals) dbg1 s = let p = "multiBalanceReport" in Hledger.Utils.dbg1 (p++" "++s) -- add prefix in this function's debug output -- dbg1 = const id -- exclude this function from debug output -- | Convert all the amounts in a multi-column balance report to their -- value on the given date in their default valuation commodities -- (which are determined as of that date, not the report interval dates). multiBalanceReportValue :: Journal -> Day -> MultiBalanceReport -> MultiBalanceReport multiBalanceReportValue j d r = r' where MultiBalanceReport (spans, rows, (coltotals, rowtotaltotal, rowavgtotal)) = r r' = MultiBalanceReport (spans, [(acct, acct', depth, map convert rowamts, convert rowtotal, convert rowavg) | (acct, acct', depth, rowamts, rowtotal, rowavg) <- rows], (map convert coltotals, convert rowtotaltotal, convert rowavgtotal)) convert = mixedAmountValue j d hledger-lib-1.2/Hledger/Reports/PostingsReport.hs0000644000000000000000000005641613035210046020274 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances, TupleSections, OverloadedStrings #-} {-| Postings report, used by the register command. -} module Hledger.Reports.PostingsReport ( PostingsReport, PostingsReportItem, postingsReport, mkpostingsReportItem, -- * Tests tests_Hledger_Reports_PostingsReport ) where import Data.List import Data.Maybe import Data.Ord (comparing) -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Safe (headMay, lastMay) import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Utils import Hledger.Reports.ReportOptions -- | A postings report is a list of postings with a running total, a label -- for the total field, and a little extra transaction info to help with rendering. -- This is used eg for the register command. type PostingsReport = (String -- label for the running balance column XXX remove ,[PostingsReportItem] -- line items, one per posting ) type PostingsReportItem = (Maybe Day -- The posting date, if this is the first posting in a -- transaction or if it's different from the previous -- posting's date. Or if this a summary posting, the -- report interval's start date if this is the first -- summary posting in the interval. ,Maybe Day -- If this is a summary posting, the report interval's -- end date if this is the first summary posting in -- the interval. ,Maybe String -- The posting's transaction's description, if this is the first posting in the transaction. ,Posting -- The posting, possibly with the account name depth-clipped. ,MixedAmount -- The running total after this posting, or with --average, -- the running average posting amount. With --historical, -- postings before the report start date are included in -- the running total/average. ) -- | Select postings from the journal and add running balance and other -- information to make a postings report. Used by eg hledger's register command. postingsReport :: ReportOpts -> Query -> Journal -> PostingsReport postingsReport opts q j = (totallabel, items) where reportspan = adjustReportDates opts q j whichdate = whichDateFromOpts opts depth = queryDepth q -- postings to be included in the report, and similarly-matched postings before the report start date (precedingps, reportps) = matchedPostingsBeforeAndDuring opts q j reportspan -- postings or pseudo postings to be displayed displayps | interval == NoInterval = map (,Nothing) reportps | otherwise = summarisePostingsByInterval interval whichdate depth showempty reportspan reportps where interval = interval_ opts -- XXX showempty = empty_ opts || average_ opts -- posting report items ready for display items = dbg1 "postingsReport items" $ postingsReportItems displayps (nullposting,Nothing) whichdate depth startbal runningcalc startnum where historical = balancetype_ opts == HistoricalBalance precedingsum = sumPostings precedingps precedingavg | null precedingps = 0 | otherwise = precedingsum `divideMixedAmount` (fromIntegral $ length precedingps) startbal | average_ opts = if historical then precedingavg else 0 | otherwise = if historical then precedingsum else 0 startnum = if historical then length precedingps + 1 else 1 runningcalc | average_ opts = \i avg amt -> avg + (amt - avg) `divideMixedAmount` (fromIntegral i) -- running average | otherwise = \_ bal amt -> bal + amt -- running total totallabel = "Total" -- | Adjust report start/end dates to more useful ones based on -- journal data and report intervals. Ie: -- 1. If the start date is unspecified, use the earliest date in the journal (if any) -- 2. If the end date is unspecified, use the latest date in the journal (if any) -- 3. If a report interval is specified, enlarge the dates to enclose whole intervals adjustReportDates :: ReportOpts -> Query -> Journal -> DateSpan adjustReportDates opts q j = reportspan where -- see also multiBalanceReport requestedspan = dbg1 "requestedspan" $ queryDateSpan' q -- span specified by -b/-e/-p options and query args journalspan = dbg1 "journalspan" $ dates `spanUnion` date2s -- earliest and latest dates (or date2s) in the journal where dates = journalDateSpan False j date2s = journalDateSpan True j requestedspanclosed = dbg1 "requestedspanclosed" $ requestedspan `spanDefaultsFrom` journalspan -- if open-ended, close it using the journal's dates (if any) intervalspans = dbg1 "intervalspans" $ splitSpan (interval_ opts) requestedspanclosed -- get the whole intervals enclosing that mreportstart = dbg1 "reportstart" $ maybe Nothing spanStart $ headMay intervalspans -- start of the first interval, or open ended mreportend = dbg1 "reportend" $ maybe Nothing spanEnd $ lastMay intervalspans -- end of the last interval, or open ended reportspan = dbg1 "reportspan" $ DateSpan mreportstart mreportend -- the requested span enlarged to whole intervals if possible -- | Find postings matching a given query, within a given date span, -- and also any similarly-matched postings before that date span. -- Date restrictions and depth restrictions in the query are ignored. -- A helper for the postings report. matchedPostingsBeforeAndDuring :: ReportOpts -> Query -> Journal -> DateSpan -> ([Posting],[Posting]) matchedPostingsBeforeAndDuring opts q j (DateSpan mstart mend) = dbg1 "beforeps, duringps" $ span (beforestartq `matchesPosting`) beforeandduringps where beforestartq = dbg1 "beforestartq" $ dateqtype $ DateSpan Nothing mstart beforeandduringps = dbg1 "ps4" $ sortBy (comparing sortdate) $ -- sort postings by date or date2 dbg1 "ps3" $ map (filterPostingAmount symq) $ -- remove amount parts which the query's cur: terms would exclude dbg1 "ps2" $ (if related_ opts then concatMap relatedPostings else id) $ -- with -r, replace each with its sibling postings dbg1 "ps1" $ filter (beforeandduringq `matchesPosting`) $ -- filter postings by the query, with no start date or depth limit journalPostings $ journalSelectingAmountFromOpts opts j where beforeandduringq = dbg1 "beforeandduringq" $ And [depthless $ dateless q, beforeendq] where depthless = filterQuery (not . queryIsDepth) dateless = filterQuery (not . queryIsDateOrDate2) beforeendq = dateqtype $ DateSpan Nothing mend sortdate = if date2_ opts then postingDate2 else postingDate symq = dbg1 "symq" $ filterQuery queryIsSym q dateqtype | queryIsDate2 dateq || (queryIsDate dateq && date2_ opts) = Date2 | otherwise = Date where dateq = dbg1 "dateq" $ filterQuery queryIsDateOrDate2 $ dbg1 "q" q -- XXX confused by multiple date:/date2: ? -- | Generate postings report line items from a list of postings or (with -- non-Nothing dates attached) summary postings. postingsReportItems :: [(Posting,Maybe Day)] -> (Posting,Maybe Day) -> WhichDate -> Int -> MixedAmount -> (Int -> MixedAmount -> MixedAmount -> MixedAmount) -> Int -> [PostingsReportItem] postingsReportItems [] _ _ _ _ _ _ = [] postingsReportItems ((p,menddate):ps) (pprev,menddateprev) wd d b runningcalcfn itemnum = i:(postingsReportItems ps (p,menddate) wd d b' runningcalcfn (itemnum+1)) where i = mkpostingsReportItem showdate showdesc wd menddate p' b' (showdate, showdesc) | isJust menddate = (menddate /= menddateprev, False) | otherwise = (isfirstintxn || isdifferentdate, isfirstintxn) isfirstintxn = ptransaction p /= ptransaction pprev isdifferentdate = case wd of PrimaryDate -> postingDate p /= postingDate pprev SecondaryDate -> postingDate2 p /= postingDate2 pprev p' = p{paccount= clipOrEllipsifyAccountName d $ paccount p} b' = runningcalcfn itemnum b (pamount p) -- | Generate one postings report line item, containing the posting, -- the current running balance, and optionally the posting date and/or -- the transaction description. mkpostingsReportItem :: Bool -> Bool -> WhichDate -> Maybe Day -> Posting -> MixedAmount -> PostingsReportItem mkpostingsReportItem showdate showdesc wd menddate p b = (if showdate then Just date else Nothing ,menddate ,if showdesc then Just desc else Nothing ,p ,b ) where date = case wd of PrimaryDate -> postingDate p SecondaryDate -> postingDate2 p desc = T.unpack $ maybe "" tdescription $ ptransaction p -- | Convert a list of postings into summary postings, one per interval, -- aggregated to the specified depth if any. summarisePostingsByInterval :: Interval -> WhichDate -> Int -> Bool -> DateSpan -> [Posting] -> [SummaryPosting] summarisePostingsByInterval interval wd depth showempty reportspan ps = concatMap summarisespan $ splitSpan interval reportspan where summarisespan s = summarisePostingsInDateSpan s wd depth showempty (postingsinspan s) postingsinspan s = filter (isPostingInDateSpan' wd s) ps tests_summarisePostingsByInterval = [ "summarisePostingsByInterval" ~: do summarisePostingsByInterval (Quarters 1) PrimaryDate 99999 False (DateSpan Nothing Nothing) [] ~?= [] ] -- | A summary posting summarises the activity in one account within a report -- interval. It is currently kludgily represented by a regular Posting with no -- description, the interval's start date stored as the posting date, and the -- interval's end date attached with a tuple. type SummaryPosting = (Posting, Maybe Day) -- | Given a date span (representing a report interval) and a list of -- postings within it, aggregate the postings into one summary posting per -- account. -- -- When a depth argument is present, postings to accounts of greater -- depth are also aggregated where possible. If the depth is 0, all -- postings in the span are aggregated into a single posting with -- account name "...". -- -- The showempty flag includes spans with no postings and also postings -- with 0 amount. -- summarisePostingsInDateSpan :: DateSpan -> WhichDate -> Int -> Bool -> [Posting] -> [SummaryPosting] summarisePostingsInDateSpan (DateSpan b e) wd depth showempty ps | null ps && (isNothing b || isNothing e) = [] | null ps && showempty = [(summaryp, Just e')] | otherwise = summarypes where postingdate = if wd == PrimaryDate then postingDate else postingDate2 b' = fromMaybe (maybe nulldate postingdate $ headMay ps) b e' = fromMaybe (maybe (addDays 1 nulldate) postingdate $ lastMay ps) e summaryp = nullposting{pdate=Just b'} clippedanames | depth > 0 = nub $ map (clipAccountName depth) anames | otherwise = ["..."] summaryps | depth > 0 = [summaryp{paccount=a,pamount=balance a} | a <- clippedanames] | otherwise = [summaryp{paccount="...",pamount=sum $ map pamount ps}] summarypes = map (, Just e') $ (if showempty then id else filter (not . isZeroMixedAmount . pamount)) summaryps anames = sort $ nub $ map paccount ps -- aggregate balances by account, like ledgerFromJournal, then do depth-clipping accts = accountsFromPostings ps balance a = maybe nullmixedamt bal $ lookupAccount a accts where bal = if isclipped a then aibalance else aebalance isclipped a = accountNameLevel a >= depth -- tests_summarisePostingsInDateSpan = [ -- "summarisePostingsInDateSpan" ~: do -- let gives (b,e,depth,showempty,ps) = -- (summarisePostingsInDateSpan (mkdatespan b e) depth showempty ps `is`) -- let ps = -- [ -- nullposting{lpdescription="desc",lpaccount="expenses:food:groceries",lpamount=Mixed [usd 1]} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food:dining", lpamount=Mixed [usd 2]} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food", lpamount=Mixed [usd 4]} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food:dining", lpamount=Mixed [usd 8]} -- ] -- ("2008/01/01","2009/01/01",0,9999,False,[]) `gives` -- [] -- ("2008/01/01","2009/01/01",0,9999,True,[]) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31"} -- ] -- ("2008/01/01","2009/01/01",0,9999,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food", lpamount=Mixed [usd 4]} -- ,nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food:dining", lpamount=Mixed [usd 10]} -- ,nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food:groceries",lpamount=Mixed [usd 1]} -- ] -- ("2008/01/01","2009/01/01",0,2,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food",lpamount=Mixed [usd 15]} -- ] -- ("2008/01/01","2009/01/01",0,1,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses",lpamount=Mixed [usd 15]} -- ] -- ("2008/01/01","2009/01/01",0,0,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="",lpamount=Mixed [usd 15]} -- ] tests_postingsReport = [ "postingsReport" ~: do -- with the query specified explicitly let (query, journal) `gives` n = (length $ snd $ postingsReport defreportopts query journal) `is` n (Any, nulljournal) `gives` 0 (Any, samplejournal) `gives` 11 -- register --depth just clips account names (Depth 2, samplejournal) `gives` 11 (And [Depth 1, Status Cleared, Acct "expenses"], samplejournal) `gives` 2 (And [And [Depth 1, Status Cleared], Acct "expenses"], samplejournal) `gives` 2 -- with query and/or command-line options assertEqual "" 11 (length $ snd $ postingsReport defreportopts Any samplejournal) assertEqual "" 9 (length $ snd $ postingsReport defreportopts{interval_=Months 1} Any samplejournal) assertEqual "" 19 (length $ snd $ postingsReport defreportopts{interval_=Months 1, empty_=True} Any samplejournal) assertEqual "" 4 (length $ snd $ postingsReport defreportopts (Acct "assets:bank:checking") samplejournal) -- (defreportopts, And [Acct "a a", Acct "'b"], samplejournal2) `gives` 0 -- [(Just (parsedate "2008-01-01","income"),assets:bank:checking $1,$1) -- ,(Nothing,income:salary $-1,0) -- ,(Just (2008-06-01,"gift"),assets:bank:checking $1,$1) -- ,(Nothing,income:gifts $-1,0) -- ,(Just (2008-06-02,"save"),assets:bank:saving $1,$1) -- ,(Nothing,assets:bank:checking $-1,0) -- ,(Just (2008-06-03,"eat & shop"),expenses:food $1,$1) -- ,(Nothing,expenses:supplies $1,$2) -- ,(Nothing,assets:cash $-2,0) -- ,(Just (2008-12-31,"pay off"),liabilities:debts $1,$1) -- ,(Nothing,assets:bank:checking $-1,0) -- ] {- let opts = defreportopts (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank:checking $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank:checking $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank:saving $1 $1" ," assets:bank:checking $-1 0" ,"2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report with cleared option" ~: do let opts = defreportopts{cleared_=True} j <- readJournal' sample_journal_str (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report with uncleared option" ~: do let opts = defreportopts{uncleared_=True} j <- readJournal' sample_journal_str (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank:checking $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank:checking $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank:saving $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report sorts by date" ~: do j <- readJournal' $ unlines ["2008/02/02 a" ," b 1" ," c" ,"" ,"2008/01/01 d" ," e 1" ," f" ] let opts = defreportopts registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/02/02"] ,"postings report with account pattern" ~: do j <- samplejournal let opts = defreportopts{patterns_=["cash"]} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop assets:cash $-2 $-2" ] ,"postings report with account pattern, case insensitive" ~: do j <- samplejournal let opts = defreportopts{patterns_=["cAsH"]} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop assets:cash $-2 $-2" ] ,"postings report with display expression" ~: do j <- samplejournal let gives displayexpr = (registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is`) where opts = defreportopts{display_=Just displayexpr} "d<[2008/6/2]" `gives` ["2008/01/01","2008/06/01"] "d<=[2008/6/2]" `gives` ["2008/01/01","2008/06/01","2008/06/02"] "d=[2008/6/2]" `gives` ["2008/06/02"] "d>=[2008/6/2]" `gives` ["2008/06/02","2008/06/03","2008/12/31"] "d>[2008/6/2]" `gives` ["2008/06/03","2008/12/31"] ,"postings report with period expression" ~: do j <- samplejournal let periodexpr `gives` dates = do j' <- samplejournal registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j') `is` dates where opts = defreportopts{period_=maybePeriod date1 periodexpr} "" `gives` ["2008/01/01","2008/06/01","2008/06/02","2008/06/03","2008/12/31"] "2008" `gives` ["2008/01/01","2008/06/01","2008/06/02","2008/06/03","2008/12/31"] "2007" `gives` [] "june" `gives` ["2008/06/01","2008/06/02","2008/06/03"] "monthly" `gives` ["2008/01/01","2008/06/01","2008/12/01"] "quarterly" `gives` ["2008/01/01","2008/04/01","2008/10/01"] let opts = defreportopts{period_=maybePeriod date1 "yearly"} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 - 2008/12/31 assets:bank:saving $1 $1" ," assets:cash $-2 $-1" ," expenses:food $1 0" ," expenses:supplies $1 $1" ," income:gifts $-1 0" ," income:salary $-1 $-1" ," liabilities:debts $1 0" ] let opts = defreportopts{period_=maybePeriod date1 "quarterly"} registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/04/01","2008/10/01"] let opts = defreportopts{period_=maybePeriod date1 "quarterly",empty_=True} registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/04/01","2008/07/01","2008/10/01"] ] , "postings report with depth arg" ~: do j <- samplejournal let opts = defreportopts{depth_=Just 2} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank $1 $1" ," assets:bank $-1 0" ,"2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank $-1 0" ] -} ] tests_Hledger_Reports_PostingsReport :: Test tests_Hledger_Reports_PostingsReport = TestList $ tests_summarisePostingsByInterval ++ tests_postingsReport hledger-lib-1.2/Hledger/Reports/TransactionsReports.hs0000644000000000000000000003305413035210046021312 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Here are several variants of a transactions report. Transactions reports are like a postings report, but more transaction-oriented, and (in the account-centric variant) relative to a some base account. They are used by hledger-web. -} module Hledger.Reports.TransactionsReports ( TransactionsReport, TransactionsReportItem, AccountTransactionsReport, AccountTransactionsReportItem, triOrigTransaction, triDate, triAmount, triBalance, triCommodityAmount, triCommodityBalance, journalTransactionsReport, accountTransactionsReport, transactionsReportByCommodity, transactionRegisterDate -- -- * Tests -- tests_Hledger_Reports_TransactionsReports ) where import Data.List import Data.Ord -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar -- import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Reports.ReportOptions import Hledger.Utils.Debug -- | A transactions report includes a list of transactions -- (posting-filtered and unfiltered variants), a running balance, and some -- other information helpful for rendering a register view (a flag -- indicating multiple other accounts and a display string describing -- them) with or without a notion of current account(s). -- Two kinds of report use this data structure, see journalTransactionsReport -- and accountTransactionsReport below for detais. type TransactionsReport = (String -- label for the balance column, eg "balance" or "total" ,[TransactionsReportItem] -- line items, one per transaction ) type TransactionsReportItem = (Transaction -- the original journal transaction, unmodified ,Transaction -- the transaction as seen from a particular account, with postings maybe filtered ,Bool -- is this a split, ie more than one other account posting ,String -- a display string describing the other account(s), if any ,MixedAmount -- the amount posted to the current account(s) by the filtered postings (or total amount posted) ,MixedAmount -- the running total of item amounts, starting from zero; -- or with --historical, the running total including items -- (matched by the report query) preceding the report period ) triOrigTransaction (torig,_,_,_,_,_) = torig triDate (_,tacct,_,_,_,_) = tdate tacct triAmount (_,_,_,_,a,_) = a triBalance (_,_,_,_,_,a) = a triCommodityAmount c = filterMixedAmountByCommodity c . triAmount triCommodityBalance c = filterMixedAmountByCommodity c . triBalance ------------------------------------------------------------------------------- -- | Select transactions from the whole journal. This is similar to a -- "postingsReport" except with transaction-based report items which -- are ordered most recent first. XXX Or an EntriesReport - use that instead ? -- This is used by hledger-web's journal view. journalTransactionsReport :: ReportOpts -> Journal -> Query -> TransactionsReport journalTransactionsReport opts j q = (totallabel, items) where -- XXX items' first element should be the full transaction with all postings items = reverse $ accountTransactionsReportItems q None nullmixedamt id ts ts = sortBy (comparing date) $ filter (q `matchesTransaction`) $ jtxns $ journalSelectingAmountFromOpts opts j date = transactionDateFn opts ------------------------------------------------------------------------------- -- | An account transactions report represents transactions affecting -- a particular account (or possibly several accounts, but we don't -- use that). It is used eg by hledger-ui's and hledger-web's account -- register view, where we want to show one row per transaction, in -- the context of the current account. Report items consist of: -- -- - the transaction, unmodified -- -- - the transaction as seen in the context of the current account and query, -- which means: -- -- - the transaction date is set to the "transaction context date", -- which can be different from the transaction's general date: -- if postings to the current account (and matched by the report query) -- have their own dates, it's the earliest of these dates. -- -- - the transaction's postings are filtered, excluding any which are not -- matched by the report query -- -- - a text description of the other account(s) posted to/from -- -- - a flag indicating whether there's more than one other account involved -- -- - the total increase/decrease to the current account -- -- - the report transactions' running total after this transaction; -- or if historical balance is requested (-H), the historical running total. -- The historical running total includes transactions from before the -- report start date if one is specified, filtered by the report query. -- The historical running total may or may not be the account's historical -- running balance, depending on the report query. -- -- Items are sorted by transaction register date (the earliest date the transaction -- posts to the current account), most recent first. -- Reporting intervals are currently ignored. -- type AccountTransactionsReport = (String -- label for the balance column, eg "balance" or "total" ,[AccountTransactionsReportItem] -- line items, one per transaction ) type AccountTransactionsReportItem = ( Transaction -- the transaction, unmodified ,Transaction -- the transaction, as seen from the current account ,Bool -- is this a split (more than one posting to other accounts) ? ,String -- a display string describing the other account(s), if any ,MixedAmount -- the amount posted to the current account(s) (or total amount posted) ,MixedAmount -- the register's running total or the current account(s)'s historical balance, after this transaction ) accountTransactionsReport :: ReportOpts -> Journal -> Query -> Query -> AccountTransactionsReport accountTransactionsReport opts j reportq thisacctq = (label, items) where -- a depth limit does not affect the account transactions report -- seems unnecessary for some reason XXX reportq' = -- filterQuery (not . queryIsDepth) reportq -- get all transactions, with amounts converted to cost basis if -B ts1 = jtxns $ journalSelectingAmountFromOpts opts j -- apply any cur:SYM filters in reportq' symq = filterQuery queryIsSym reportq' ts2 = (if queryIsNull symq then id else map (filterTransactionAmounts symq)) ts1 -- keep just the transactions affecting this account (via possibly realness or status-filtered postings) realq = filterQuery queryIsReal reportq' statusq = filterQuery queryIsStatus reportq' ts3 = filter (matchesTransaction thisacctq . filterTransactionPostings (And [realq, statusq])) ts2 -- sort by the transaction's register date, for accurate starting balance ts = sortBy (comparing (transactionRegisterDate reportq' thisacctq)) ts3 (startbal,label) | balancetype_ opts == HistoricalBalance = (sumPostings priorps, balancelabel) | otherwise = (nullmixedamt, totallabel) where priorps = dbg1 "priorps" $ filter (matchesPosting (dbg1 "priorq" $ And [thisacctq, tostartdateq, datelessreportq])) $ transactionsPostings ts tostartdateq = case mstartdate of Just _ -> Date (DateSpan Nothing mstartdate) Nothing -> None -- no start date specified, there are no prior postings mstartdate = queryStartDate (date2_ opts) reportq' datelessreportq = filterQuery (not . queryIsDateOrDate2) reportq' items = reverse $ -- see also registerChartHtml accountTransactionsReportItems reportq' thisacctq startbal negate ts totallabel = "Period Total" balancelabel = "Historical Total" -- | Generate transactions report items from a list of transactions, -- using the provided user-specified report query, a query specifying -- which account to use as the focus, a starting balance, a sign-setting -- function and a balance-summing function. Or with a None current account -- query, this can also be used for the journalTransactionsReport. accountTransactionsReportItems :: Query -> Query -> MixedAmount -> (MixedAmount -> MixedAmount) -> [Transaction] -> [TransactionsReportItem] accountTransactionsReportItems _ _ _ _ [] = [] accountTransactionsReportItems reportq thisacctq bal signfn (torig:ts) = case i of Just i' -> i':is Nothing -> is -- 201403: This is used for both accountTransactionsReport and journalTransactionsReport, which makes it a bit overcomplicated -- 201407: I've lost my grip on this, let's just hope for the best -- 201606: we now calculate change and balance from filtered postings, check this still works well for all callers XXX where tfiltered@Transaction{tpostings=reportps} = filterTransactionPostings reportq torig tacct = tfiltered{tdate=transactionRegisterDate reportq thisacctq tfiltered} (i,bal') = case reportps of [] -> (Nothing,bal) -- no matched postings in this transaction, skip it _ -> (Just (torig, tacct, numotheraccts > 1, otheracctstr, a, b), b) where (thisacctps, otheracctps) = partition (matchesPosting thisacctq) reportps numotheraccts = length $ nub $ map paccount otheracctps otheracctstr | thisacctq == None = summarisePostingAccounts reportps -- no current account ? summarise all matched postings | numotheraccts == 0 = summarisePostingAccounts thisacctps -- only postings to current account ? summarise those | otherwise = summarisePostingAccounts otheracctps -- summarise matched postings to other account(s) a = signfn $ negate $ sum $ map pamount thisacctps b = bal + a is = accountTransactionsReportItems reportq thisacctq bal' signfn ts -- | What is the transaction's date in the context of a particular account -- (specified with a query) and report query, as in an account register ? -- It's normally the transaction's general date, but if any posting(s) -- matched by the report query and affecting the matched account(s) have -- their own earlier dates, it's the earliest of these dates. -- Secondary transaction/posting dates are ignored. transactionRegisterDate :: Query -> Query -> Transaction -> Day transactionRegisterDate reportq thisacctq t | null thisacctps = tdate t | otherwise = minimum $ map postingDate thisacctps where reportps = tpostings $ filterTransactionPostings reportq t thisacctps = filter (matchesPosting thisacctq) reportps -- -- | Generate a short readable summary of some postings, like -- -- "from (negatives) to (positives)". -- summarisePostings :: [Posting] -> String -- summarisePostings ps = -- case (summarisePostingAccounts froms, summarisePostingAccounts tos) of -- ("",t) -> "to "++t -- (f,"") -> "from "++f -- (f,t) -> "from "++f++" to "++t -- where -- (froms,tos) = partition (fromMaybe False . isNegativeMixedAmount . pamount) ps -- | Generate a simplified summary of some postings' accounts. -- To reduce noise, if there are both real and virtual postings, show only the real ones. summarisePostingAccounts :: [Posting] -> String summarisePostingAccounts ps = (intercalate ", " . map (T.unpack . accountSummarisedName) . nub . map paccount) displayps -- XXX pack where realps = filter isReal ps displayps | null realps = ps | otherwise = realps ------------------------------------------------------------------------------- -- | Split a transactions report whose items may involve several commodities, -- into one or more single-commodity transactions reports. transactionsReportByCommodity :: TransactionsReport -> [(CommoditySymbol, TransactionsReport)] transactionsReportByCommodity tr = [(c, filterTransactionsReportByCommodity c tr) | c <- transactionsReportCommodities tr] where transactionsReportCommodities (_,items) = nub $ sort $ map acommodity $ concatMap (amounts . triAmount) items -- Remove transaction report items and item amount (and running -- balance amount) components that don't involve the specified -- commodity. Other item fields such as the transaction are left unchanged. filterTransactionsReportByCommodity :: CommoditySymbol -> TransactionsReport -> TransactionsReport filterTransactionsReportByCommodity c (label,items) = (label, fixTransactionsReportItemBalances $ concat [filterTransactionsReportItemByCommodity c i | i <- items]) where filterTransactionsReportItemByCommodity c (t,t2,s,o,a,bal) | c `elem` cs = [item'] | otherwise = [] where cs = map acommodity $ amounts a item' = (t,t2,s,o,a',bal) a' = filterMixedAmountByCommodity c a fixTransactionsReportItemBalances [] = [] fixTransactionsReportItemBalances [i] = [i] fixTransactionsReportItemBalances items = reverse $ i:(go startbal is) where i:is = reverse items startbal = filterMixedAmountByCommodity c $ triBalance i go _ [] = [] go bal ((t,t2,s,o,amt,_):is) = (t,t2,s,o,amt,bal'):go bal' is where bal' = bal + amt ------------------------------------------------------------------------------- hledger-lib-1.2/Hledger/Utils.hs0000644000000000000000000001515213066746043014743 0ustar0000000000000000{-| Standard imports and utilities which are useful everywhere, or needed low in the module hierarchy. This is the bottom of hledger's module graph. -} module Hledger.Utils (---- provide these frequently used modules - or not, for clearer api: -- module Control.Monad, -- module Data.List, -- module Data.Maybe, -- module Data.Time.Calendar, -- module Data.Time.Clock, -- module Data.Time.LocalTime, -- module Data.Tree, -- module Text.RegexPR, -- module Test.HUnit, -- module Text.Printf, ---- all of this one: module Hledger.Utils, module Hledger.Utils.Debug, module Hledger.Utils.Parse, module Hledger.Utils.Regex, module Hledger.Utils.String, module Hledger.Utils.Text, module Hledger.Utils.Test, module Hledger.Utils.Tree, -- Debug.Trace.trace, -- module Data.PPrint, -- module Hledger.Utils.UTF8IOCompat SystemString,fromSystemString,toSystemString,error',userError',usageError, -- the rest need to be done in each module I think ) where import Control.Monad (liftM) -- import Data.Char import Data.List -- import Data.Maybe -- import Data.PPrint import Data.Text (Text) import qualified Data.Text.IO as T import Data.Time.Clock import Data.Time.LocalTime -- import Data.Text (Text) -- import qualified Data.Text as T import System.Directory (getHomeDirectory) import System.FilePath((), isRelative) import System.IO -- import Text.Printf -- import qualified Data.Map as Map import Hledger.Utils.Debug import Hledger.Utils.Parse import Hledger.Utils.Regex import Hledger.Utils.String import Hledger.Utils.Text import Hledger.Utils.Test import Hledger.Utils.Tree -- import Prelude hiding (readFile,writeFile,appendFile,getContents,putStr,putStrLn) -- import Hledger.Utils.UTF8IOCompat (readFile,writeFile,appendFile,getContents,putStr,putStrLn) import Hledger.Utils.UTF8IOCompat (SystemString,fromSystemString,toSystemString,error',userError',usageError) -- tuples first3 (x,_,_) = x second3 (_,x,_) = x third3 (_,_,x) = x first4 (x,_,_,_) = x second4 (_,x,_,_) = x third4 (_,_,x,_) = x fourth4 (_,_,_,x) = x first5 (x,_,_,_,_) = x second5 (_,x,_,_,_) = x third5 (_,_,x,_,_) = x fourth5 (_,_,_,x,_) = x fifth5 (_,_,_,_,x) = x first6 (x,_,_,_,_,_) = x second6 (_,x,_,_,_,_) = x third6 (_,_,x,_,_,_) = x fourth6 (_,_,_,x,_,_) = x fifth6 (_,_,_,_,x,_) = x sixth6 (_,_,_,_,_,x) = x -- lists splitAtElement :: Eq a => a -> [a] -> [[a]] splitAtElement x l = case l of [] -> [] e:es | e==x -> split es es -> split es where split es = let (first,rest) = break (x==) es in first : splitAtElement x rest -- text -- time getCurrentLocalTime :: IO LocalTime getCurrentLocalTime = do t <- getCurrentTime tz <- getCurrentTimeZone return $ utcToLocalTime tz t getCurrentZonedTime :: IO ZonedTime getCurrentZonedTime = do t <- getCurrentTime tz <- getCurrentTimeZone return $ utcToZonedTime tz t -- misc isLeft :: Either a b -> Bool isLeft (Left _) = True isLeft _ = False isRight :: Either a b -> Bool isRight = not . isLeft -- | Apply a function the specified number of times. Possibly uses O(n) stack ? applyN :: Int -> (a -> a) -> a -> a applyN n f = (!! n) . iterate f -- from protolude, compare -- applyN :: Int -> (a -> a) -> a -> a -- applyN n f = X.foldr (.) identity (X.replicate n f) -- | Convert a possibly relative, possibly tilde-containing file path to an absolute one, -- given the current directory. ~username is not supported. Leave "-" unchanged. -- Can raise an error. expandPath :: FilePath -> FilePath -> IO FilePath -- general type sig for use in reader parsers expandPath _ "-" = return "-" expandPath curdir p = (if isRelative p then (curdir ) else id) `liftM` expandPath' p where expandPath' ('~':'/':p) = ( p) <$> getHomeDirectory expandPath' ('~':'\\':p) = ( p) <$> getHomeDirectory expandPath' ('~':_) = ioError $ userError "~USERNAME in paths is not supported" expandPath' p = return p firstJust ms = case dropWhile (==Nothing) ms of [] -> Nothing (md:_) -> md -- | Read a file in universal newline mode, handling any of the usual line ending conventions. readFile' :: FilePath -> IO Text readFile' name = do h <- openFile name ReadMode hSetNewlineMode h universalNewlineMode T.hGetContents h -- | Read a file in universal newline mode, handling any of the usual line ending conventions. readFileAnyLineEnding :: FilePath -> IO Text readFileAnyLineEnding path = do h <- openFile path ReadMode hSetNewlineMode h universalNewlineMode T.hGetContents h -- | Read the given file, or standard input if the path is "-", using -- universal newline mode. readFileOrStdinAnyLineEnding :: String -> IO Text readFileOrStdinAnyLineEnding f = do h <- fileHandle f hSetNewlineMode h universalNewlineMode T.hGetContents h where fileHandle "-" = return stdin fileHandle f = openFile f ReadMode -- | Total version of maximum, for integral types, giving 0 for an empty list. maximum' :: Integral a => [a] -> a maximum' [] = 0 maximum' xs = maximumStrict xs -- | Strict version of sum that doesn’t leak space {-# INLINABLE sumStrict #-} sumStrict :: Num a => [a] -> a sumStrict = foldl' (+) 0 -- | Strict version of maximum that doesn’t leak space {-# INLINABLE maximumStrict #-} maximumStrict :: Ord a => [a] -> a maximumStrict = foldl1' max -- | Strict version of minimum that doesn’t leak space {-# INLINABLE minimumStrict #-} minimumStrict :: Ord a => [a] -> a minimumStrict = foldl1' min -- | This is a version of sequence based on difference lists. It is -- slightly faster but we mostly use it because it uses the heap -- instead of the stack. This has the advantage that Neil Mitchell’s -- trick of limiting the stack size to discover space leaks doesn’t -- show this as a false positive. {-# INLINABLE sequence' #-} sequence' :: Monad f => [f a] -> f [a] sequence' ms = do h <- go id ms return (h []) where go h [] = return h go h (m:ms) = do x <- m go (h . (x :)) ms {-# INLINABLE mapM' #-} mapM' :: Monad f => (a -> f b) -> [a] -> f [b] mapM' f = sequence' . map f hledger-lib-1.2/Hledger/Utils/Debug.hs0000644000000000000000000002134613035510426015761 0ustar0000000000000000{-# LANGUAGE CPP, FlexibleContexts, TypeFamilies #-} -- | Debugging helpers -- more: -- http://hackage.haskell.org/packages/archive/TraceUtils/0.1.0.2/doc/html/Debug-TraceUtils.html -- http://hackage.haskell.org/packages/archive/trace-call/0.1/doc/html/Debug-TraceCall.html -- http://hackage.haskell.org/packages/archive/htrace/0.1/doc/html/Debug-HTrace.html -- http://hackage.haskell.org/packages/archive/traced/2009.7.20/doc/html/Debug-Traced.html module Hledger.Utils.Debug ( module Hledger.Utils.Debug ,module Debug.Trace #if __GLASGOW_HASKELL__ >= 704 ,ppShow #endif ) where import Control.Monad (when) import Control.Monad.IO.Class import Data.List hiding (uncons) import qualified Data.Text as T import Debug.Trace import Hledger.Utils.Parse import Safe (readDef) import System.Environment (getArgs) import System.Exit import System.IO.Unsafe (unsafePerformIO) import Text.Megaparsec import Text.Printf #if __GLASGOW_HASKELL__ >= 704 import Text.Show.Pretty (ppShow) #else -- the required pretty-show version requires GHC >= 7.4 ppShow :: Show a => a -> String ppShow = show #endif pprint :: Show a => a -> IO () pprint = putStrLn . ppShow -- | Trace (print to stderr) a showable value using a custom show function. traceWith :: (a -> String) -> a -> a traceWith f a = trace (f a) a -- | Parsec trace - show the current parsec position and next input, -- and the provided label if it's non-null. ptrace :: String -> TextParser m () ptrace msg = do pos <- getPosition next <- (T.take peeklength) `fmap` getInput let (l,c) = (sourceLine pos, sourceColumn pos) s = printf "at line %2d col %2d: %s" (unPos l) (unPos c) (show next) :: String s' = printf ("%-"++show (peeklength+30)++"s") s ++ " " ++ msg trace s' $ return () where peeklength = 30 -- | Global debug level, which controls the verbosity of debug output -- on the console. The default is 0 meaning no debug output. The -- @--debug@ command line flag sets it to 1, or @--debug=N@ sets it to -- a higher value (note: not @--debug N@ for some reason). This uses -- unsafePerformIO and can be accessed from anywhere and before normal -- command-line processing. When running with :main in GHCI, you must -- touch and reload this module to see the effect of a new --debug option. -- After command-line processing, it is also available as the @debug_@ -- field of 'Hledger.Cli.CliOptions.CliOpts'. -- {-# OPTIONS_GHC -fno-cse #-} -- {-# NOINLINE debugLevel #-} debugLevel :: Int debugLevel = case snd $ break (=="--debug") args of "--debug":[] -> 1 "--debug":n:_ -> readDef 1 n _ -> case take 1 $ filter ("--debug" `isPrefixOf`) args of ['-':'-':'d':'e':'b':'u':'g':'=':v] -> readDef 1 v _ -> 0 where args = unsafePerformIO getArgs -- | Convenience aliases for tracePrettyAt. -- Always pretty-print a message and the showable value to the console, then return it. -- ("dbg" without the 0 clashes with megaparsec 5.1). dbg0 :: Show a => String -> a -> a dbg0 = tracePrettyAt 0 -- | Pretty-print a message and the showable value to the console when the debug level is >= 1, then return it. Uses unsafePerformIO. dbg1 :: Show a => String -> a -> a dbg1 = tracePrettyAt 1 dbg2 :: Show a => String -> a -> a dbg2 = tracePrettyAt 2 dbg3 :: Show a => String -> a -> a dbg3 = tracePrettyAt 3 dbg4 :: Show a => String -> a -> a dbg4 = tracePrettyAt 4 dbg5 :: Show a => String -> a -> a dbg5 = tracePrettyAt 5 dbg6 :: Show a => String -> a -> a dbg6 = tracePrettyAt 6 dbg7 :: Show a => String -> a -> a dbg7 = tracePrettyAt 7 dbg8 :: Show a => String -> a -> a dbg8 = tracePrettyAt 8 dbg9 :: Show a => String -> a -> a dbg9 = tracePrettyAt 9 -- | Convenience aliases for tracePrettyAtIO. -- Like dbg, but convenient to insert in an IO monad. -- XXX These have a bug; they should use traceIO, not trace, -- otherwise GHC can occasionally over-optimise -- (cf lpaste a few days ago where it killed/blocked a child thread). dbg0IO :: (MonadIO m, Show a) => String -> a -> m () dbg0IO = tracePrettyAtIO 0 dbg1IO :: (MonadIO m, Show a) => String -> a -> m () dbg1IO = tracePrettyAtIO 1 dbg2IO :: (MonadIO m, Show a) => String -> a -> m () dbg2IO = tracePrettyAtIO 2 dbg3IO :: (MonadIO m, Show a) => String -> a -> m () dbg3IO = tracePrettyAtIO 3 dbg4IO :: (MonadIO m, Show a) => String -> a -> m () dbg4IO = tracePrettyAtIO 4 dbg5IO :: (MonadIO m, Show a) => String -> a -> m () dbg5IO = tracePrettyAtIO 5 dbg6IO :: (MonadIO m, Show a) => String -> a -> m () dbg6IO = tracePrettyAtIO 6 dbg7IO :: (MonadIO m, Show a) => String -> a -> m () dbg7IO = tracePrettyAtIO 7 dbg8IO :: (MonadIO m, Show a) => String -> a -> m () dbg8IO = tracePrettyAtIO 8 dbg9IO :: (MonadIO m, Show a) => String -> a -> m () dbg9IO = tracePrettyAtIO 9 -- | Pretty-print a message and a showable value to the console if the debug level is at or above the specified level. -- dbtAt 0 always prints. Otherwise, uses unsafePerformIO. tracePrettyAt :: Show a => Int -> String -> a -> a tracePrettyAt lvl = dbgppshow lvl -- tracePrettyAtM :: (Monad m, Show a) => Int -> String -> a -> m a -- tracePrettyAtM lvl lbl x = tracePrettyAt lvl lbl x `seq` return x -- XXX Could not deduce (a ~ ()) -- from the context (Show a) -- bound by the type signature for -- dbgM :: Show a => String -> a -> IO () -- at hledger/Hledger/Cli/Main.hs:200:13-42 -- ‘a’ is a rigid type variable bound by -- the type signature for dbgM :: Show a => String -> a -> IO () -- at hledger/Hledger/Cli/Main.hs:200:13 -- Expected type: String -> a -> IO () -- Actual type: String -> a -> IO a tracePrettyAtIO :: (MonadIO m, Show a) => Int -> String -> a -> m () tracePrettyAtIO lvl lbl x = liftIO $ tracePrettyAt lvl lbl x `seq` return () -- | print this string to the console before evaluating the expression, -- if the global debug level is at or above the specified level. Uses unsafePerformIO. -- dbgtrace :: Int -> String -> a -> a -- dbgtrace level -- | debugLevel >= level = trace -- | otherwise = flip const -- | Print a showable value to the console, with a message, if the -- debug level is at or above the specified level (uses -- unsafePerformIO). -- Values are displayed with show, all on one line, which is hard to read. -- dbgshow :: Show a => Int -> String -> a -> a -- dbgshow level -- | debugLevel >= level = ltrace -- | otherwise = flip const -- | Print a showable value to the console, with a message, if the -- debug level is at or above the specified level (uses -- unsafePerformIO). -- Values are displayed with ppShow, each field/constructor on its own line. dbgppshow :: Show a => Int -> String -> a -> a dbgppshow level | level > 0 && debugLevel < level = flip const | otherwise = \s a -> let p = ppShow a ls = lines p nlorspace | length ls > 1 = "\n" | otherwise = " " ++ take (10 - length s) (repeat ' ') ls' | length ls > 1 = map (" "++) ls | otherwise = ls in trace (s++":"++nlorspace++intercalate "\n" ls') a -- -- | Print a showable value to the console, with a message, if the -- -- debug level is at or above the specified level (uses -- -- unsafePerformIO). -- -- Values are displayed with pprint. Field names are not shown, but the -- -- output is compact with smart line wrapping, long data elided, -- -- and slow calculations timed out. -- dbgpprint :: Data a => Int -> String -> a -> a -- dbgpprint level msg a -- | debugLevel >= level = unsafePerformIO $ do -- pprint a >>= putStrLn . ((msg++": \n") ++) . show -- return a -- | otherwise = a -- | Like dbg, then exit the program. Uses unsafePerformIO. dbgExit :: Show a => String -> a -> a dbgExit msg = const (unsafePerformIO exitFailure) . dbg0 msg -- | Print a message and parsec debug info (parse position and next -- input) to the console when the debug level is at or above -- this level. Uses unsafePerformIO. -- pdbgAt :: GenParser m => Float -> String -> m () pdbg :: Int -> String -> TextParser m () pdbg level msg = when (level <= debugLevel) $ ptrace msg -- | Like dbg, but writes the output to "debug.log" in the current directory. -- Uses unsafePerformIO. Can fail due to log file contention if called too quickly -- ("*** Exception: debug.log: openFile: resource busy (file is locked)"). dbglog :: Show a => String -> a -> a dbglog label a = (unsafePerformIO $ appendFile "debug.log" $ label ++ ": " ++ ppShow a ++ "\n") `seq` a hledger-lib-1.2/Hledger/Utils/Parse.hs0000644000000000000000000000511713035510426016003 0ustar0000000000000000{-# LANGUAGE FlexibleContexts, TypeFamilies #-} module Hledger.Utils.Parse where import Control.Monad.Except import Data.Char import Data.List import Data.Text (Text) import Text.Megaparsec hiding (State) import Data.Functor.Identity (Identity(..)) import Text.Printf import Control.Monad.State.Strict (StateT, evalStateT) import Hledger.Data.Types import Hledger.Utils.UTF8IOCompat (error') -- | A parser of strict text with generic user state, monad and return type. type TextParser m a = ParsecT Dec Text m a type JournalStateParser m a = StateT Journal (ParsecT Dec Text m) a type JournalParser a = StateT Journal (ParsecT Dec Text Identity) a -- | A journal parser that runs in IO and can throw an error mid-parse. type ErroringJournalParser m a = StateT Journal (ParsecT Dec Text (ExceptT String m)) a -- | Backtracking choice, use this when alternatives share a prefix. -- Consumes no input if all choices fail. choice' :: [TextParser m a] -> TextParser m a choice' = choice . map Text.Megaparsec.try -- | Backtracking choice, use this when alternatives share a prefix. -- Consumes no input if all choices fail. choiceInState :: [StateT s (ParsecT Dec Text m) a] -> StateT s (ParsecT Dec Text m) a choiceInState = choice . map Text.Megaparsec.try parsewith :: Parsec e Text a -> Text -> Either (ParseError Char e) a parsewith p = runParser p "" parsewithString :: Parsec e String a -> String -> Either (ParseError Char e) a parsewithString p = runParser p "" parseWithState :: Monad m => st -> StateT st (ParsecT Dec Text m) a -> Text -> m (Either (ParseError Char Dec) a) parseWithState ctx p s = runParserT (evalStateT p ctx) "" s parseWithState' :: (Stream s, ErrorComponent e) => st -> StateT st (ParsecT e s Identity) a -> s -> (Either (ParseError (Token s) e) a) parseWithState' ctx p s = runParser (evalStateT p ctx) "" s fromparse :: (Show t, Show e) => Either (ParseError t e) a -> a fromparse = either parseerror id parseerror :: (Show t, Show e) => ParseError t e -> a parseerror e = error' $ showParseError e showParseError :: (Show t, Show e) => ParseError t e -> String showParseError e = "parse error at " ++ show e showDateParseError :: (Show t, Show e) => ParseError t e -> String showDateParseError e = printf "date parse error (%s)" (intercalate ", " $ tail $ lines $ show e) nonspace :: TextParser m Char nonspace = satisfy (not . isSpace) spacenonewline :: (Stream s, Char ~ Token s) => ParsecT Dec s m Char spacenonewline = satisfy (`elem` " \v\f\t") restofline :: TextParser m String restofline = anyChar `manyTill` newline eolof :: TextParser m () eolof = (newline >> return ()) <|> eof hledger-lib-1.2/Hledger/Utils/Regex.hs0000644000000000000000000001142413035210046015774 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables #-} {-| Easy regular expression helpers, currently based on regex-tdfa. These should: - be cross-platform, not requiring C libraries - support unicode - support extended regular expressions - support replacement, with backreferences etc. - support splitting - have mnemonic names - have simple monomorphic types - work with simple strings Regex strings are automatically compiled into regular expressions the first time they are seen, and these are cached. If you use a huge number of unique regular expressions this might lead to increased memory usage. Several functions have memoised variants (*Memo), which also trade space for time. Current limitations: - (?i) and similar are not supported -} module Hledger.Utils.Regex ( -- * type aliases Regexp ,Replacement -- * standard regex operations ,regexMatches ,regexMatchesCI ,regexReplace ,regexReplaceCI ,regexReplaceMemo ,regexReplaceCIMemo ,regexReplaceBy ,regexReplaceByCI ) where import Data.Array import Data.Char import Data.List (foldl') import Data.MemoUgly (memo) import Text.Regex.TDFA ( Regex, CompOption(..), ExecOption(..), defaultCompOpt, defaultExecOpt, makeRegexOpts, AllMatches(getAllMatches), match, (=~), MatchText ) -- import Hledger.Utils.Debug import Hledger.Utils.UTF8IOCompat (error') -- | Regular expression. Extended regular expression-ish syntax ? But does not support eg (?i) syntax. type Regexp = String -- | A replacement pattern. May include numeric backreferences (\N). type Replacement = String -- | Convert our string-based regexps to real ones. Can fail if the -- string regexp is malformed. toRegex :: Regexp -> Regex toRegex = memo (makeRegexOpts compOpt execOpt) toRegexCI :: Regexp -> Regex toRegexCI = memo (makeRegexOpts compOpt{caseSensitive=False} execOpt) compOpt :: CompOption compOpt = defaultCompOpt execOpt :: ExecOption execOpt = defaultExecOpt -- regexMatch' :: RegexContext Regexp String a => Regexp -> String -> a -- regexMatch' r s = s =~ (toRegex r) regexMatches :: Regexp -> String -> Bool regexMatches = flip (=~) regexMatchesCI :: Regexp -> String -> Bool regexMatchesCI r = match (toRegexCI r) -- | Replace all occurrences of the regexp, transforming each match with the given function. regexReplaceBy :: Regexp -> (String -> String) -> String -> String regexReplaceBy r = replaceAllBy (toRegex r) regexReplaceByCI :: Regexp -> (String -> String) -> String -> String regexReplaceByCI r = replaceAllBy (toRegexCI r) -- | Replace all occurrences of the regexp with the replacement -- pattern. The replacement pattern supports numeric backreferences -- (\N) but no other RE syntax. regexReplace :: Regexp -> Replacement -> String -> String regexReplace re = replaceRegex (toRegex re) regexReplaceCI :: Regexp -> Replacement -> String -> String regexReplaceCI re = replaceRegex (toRegexCI re) -- | A memoising version of regexReplace. Caches the result for each -- search pattern, replacement pattern, target string tuple. regexReplaceMemo :: Regexp -> Replacement -> String -> String regexReplaceMemo re repl = memo (regexReplace re repl) regexReplaceCIMemo :: Regexp -> Replacement -> String -> String regexReplaceCIMemo re repl = memo (regexReplaceCI re repl) -- replaceRegex :: Regex -> Replacement -> String -> String replaceRegex re repl s = foldl (replaceMatch repl) s (reverse $ match re s :: [MatchText String]) replaceMatch :: Replacement -> String -> MatchText String -> String replaceMatch replpat s matchgroups = pre ++ repl ++ post where ((_,(off,len)):_) = elems matchgroups -- groups should have 0-based indexes, and there should always be at least one, since this is a match (pre, post') = splitAt off s post = drop len post' repl = replaceAllBy (toRegex "\\\\[0-9]+") (replaceBackReference matchgroups) replpat replaceBackReference :: MatchText String -> String -> String replaceBackReference grps ('\\':s@(_:_)) | all isDigit s = case read s of n | n `elem` indices grps -> fst (grps ! n) _ -> error' $ "no match group exists for backreference \"\\"++s++"\"" replaceBackReference _ s = error' $ "replaceBackReference called on non-numeric-backreference \""++s++"\", shouldn't happen" -- -- http://stackoverflow.com/questions/9071682/replacement-substition-with-haskell-regex-libraries : -- | Replace all occurrences of a regexp in a string, transforming each match with the given function. replaceAllBy :: Regex -> (String -> String) -> String -> String replaceAllBy re f s = start end where (_, end, start) = foldl' go (0, s, id) $ (getAllMatches $ match re s :: [(Int, Int)]) go (ind,read,write) (off,len) = let (skip, start) = splitAt (off - ind) read (matched, remaining) = splitAt len start in (off + len, remaining, write . (skip++) . (f matched ++)) hledger-lib-1.2/Hledger/Utils/String.hs0000644000000000000000000003340013035510426016173 0ustar0000000000000000-- | String formatting helpers, starting to get a bit out of control. module Hledger.Utils.String ( -- * misc lowercase, uppercase, underline, stripbrackets, unbracket, -- quoting quoteIfNeeded, singleQuoteIfNeeded, -- quotechars, -- whitespacechars, escapeQuotes, words', unwords', -- * single-line layout strip, lstrip, rstrip, chomp, elideLeft, elideRight, formatString, -- * multi-line layout concatTopPadded, concatBottomPadded, concatOneLine, vConcatLeftAligned, vConcatRightAligned, padtop, padbottom, padleft, padright, cliptopleft, fitto, -- * wide-character-aware layout charWidth, strWidth, takeWidth, fitString, fitStringMulti, padLeftWide, padRightWide ) where import Data.Char import Data.List import Text.Megaparsec import Text.Printf (printf) import Hledger.Utils.Parse import Hledger.Utils.Regex lowercase, uppercase :: String -> String lowercase = map toLower uppercase = map toUpper -- | Remove leading and trailing whitespace. strip :: String -> String strip = lstrip . rstrip -- | Remove leading whitespace. lstrip :: String -> String lstrip = dropWhile isSpace -- | Remove trailing whitespace. rstrip :: String -> String rstrip = reverse . lstrip . reverse -- | Remove trailing newlines/carriage returns. chomp :: String -> String chomp = reverse . dropWhile (`elem` "\r\n") . reverse stripbrackets :: String -> String stripbrackets = dropWhile (`elem` "([") . reverse . dropWhile (`elem` "])") . reverse :: String -> String elideLeft :: Int -> String -> String elideLeft width s = if length s > width then ".." ++ reverse (take (width - 2) $ reverse s) else s elideRight :: Int -> String -> String elideRight width s = if length s > width then take (width - 2) s ++ ".." else s -- | Clip and pad a string to a minimum & maximum width, and/or left/right justify it. -- Works on multi-line strings too (but will rewrite non-unix line endings). formatString :: Bool -> Maybe Int -> Maybe Int -> String -> String formatString leftJustified minwidth maxwidth s = intercalate "\n" $ map (printf fmt) $ lines s where justify = if leftJustified then "-" else "" minwidth' = maybe "" show minwidth maxwidth' = maybe "" (("."++).show) maxwidth fmt = "%" ++ justify ++ minwidth' ++ maxwidth' ++ "s" underline :: String -> String underline s = s' ++ replicate (length s) '-' ++ "\n" where s' | last s == '\n' = s | otherwise = s ++ "\n" -- | Double-quote this string if it contains whitespace, single quotes -- or double-quotes, escaping the quotes as needed. quoteIfNeeded :: String -> String quoteIfNeeded s | any (`elem` s) (quotechars++whitespacechars) = "\"" ++ escapeDoubleQuotes s ++ "\"" | otherwise = s -- | Single-quote this string if it contains whitespace or double-quotes. -- No good for strings containing single quotes. singleQuoteIfNeeded :: String -> String singleQuoteIfNeeded s | any (`elem` s) whitespacechars = "'"++s++"'" | otherwise = s quotechars, whitespacechars :: [Char] quotechars = "'\"" whitespacechars = " \t\n\r" escapeDoubleQuotes :: String -> String escapeDoubleQuotes = regexReplace "\"" "\"" escapeQuotes :: String -> String escapeQuotes = regexReplace "([\"'])" "\\1" -- | Quote-aware version of words - don't split on spaces which are inside quotes. -- NB correctly handles "a'b" but not "''a''". Can raise an error if parsing fails. words' :: String -> [String] words' "" = [] words' s = map stripquotes $ fromparse $ parsewithString p s where p = do ss <- (singleQuotedPattern <|> doubleQuotedPattern <|> pattern) `sepBy` some spacenonewline -- eof return ss pattern = many (noneOf whitespacechars) singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf "'") doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf "\"") -- | Quote-aware version of unwords - single-quote strings which contain whitespace unwords' :: [String] -> String unwords' = unwords . map quoteIfNeeded -- | Strip one matching pair of single or double quotes on the ends of a string. stripquotes :: String -> String stripquotes s = if isSingleQuoted s || isDoubleQuoted s then init $ tail s else s isSingleQuoted s@(_:_:_) = head s == '\'' && last s == '\'' isSingleQuoted _ = False isDoubleQuoted s@(_:_:_) = head s == '"' && last s == '"' isDoubleQuoted _ = False unbracket :: String -> String unbracket s | (head s == '[' && last s == ']') || (head s == '(' && last s == ')') = init $ tail s | otherwise = s -- | Join several multi-line strings as side-by-side rectangular strings of the same height, top-padded. -- Treats wide characters as double width. concatTopPadded :: [String] -> String concatTopPadded strs = intercalate "\n" $ map concat $ transpose padded where lss = map lines strs h = maximum $ map length lss ypad ls = replicate (difforzero h (length ls)) "" ++ ls xpad ls = map (padLeftWide w) ls where w | null ls = 0 | otherwise = maximum $ map strWidth ls padded = map (xpad . ypad) lss -- | Join several multi-line strings as side-by-side rectangular strings of the same height, bottom-padded. -- Treats wide characters as double width. concatBottomPadded :: [String] -> String concatBottomPadded strs = intercalate "\n" $ map concat $ transpose padded where lss = map lines strs h = maximum $ map length lss ypad ls = ls ++ replicate (difforzero h (length ls)) "" xpad ls = map (padRightWide w) ls where w | null ls = 0 | otherwise = maximum $ map strWidth ls padded = map (xpad . ypad) lss -- | Join multi-line strings horizontally, after compressing each of -- them to a single line with a comma and space between each original line. concatOneLine :: [String] -> String concatOneLine strs = concat $ map ((intercalate ", ").lines) strs -- | Join strings vertically, left-aligned and right-padded. vConcatLeftAligned :: [String] -> String vConcatLeftAligned ss = intercalate "\n" $ map showfixedwidth ss where showfixedwidth = printf (printf "%%-%ds" width) width = maximum $ map length ss -- | Join strings vertically, right-aligned and left-padded. vConcatRightAligned :: [String] -> String vConcatRightAligned ss = intercalate "\n" $ map showfixedwidth ss where showfixedwidth = printf (printf "%%%ds" width) width = maximum $ map length ss -- | Convert a multi-line string to a rectangular string top-padded to the specified height. padtop :: Int -> String -> String padtop h s = intercalate "\n" xpadded where ls = lines s sh = length ls sw | null ls = 0 | otherwise = maximum $ map length ls ypadded = replicate (difforzero h sh) "" ++ ls xpadded = map (padleft sw) ypadded -- | Convert a multi-line string to a rectangular string bottom-padded to the specified height. padbottom :: Int -> String -> String padbottom h s = intercalate "\n" xpadded where ls = lines s sh = length ls sw | null ls = 0 | otherwise = maximum $ map length ls ypadded = ls ++ replicate (difforzero h sh) "" xpadded = map (padleft sw) ypadded difforzero :: (Num a, Ord a) => a -> a -> a difforzero a b = maximum [(a - b), 0] -- | Convert a multi-line string to a rectangular string left-padded to the specified width. -- Treats wide characters as double width. padleft :: Int -> String -> String padleft w "" = concat $ replicate w " " padleft w s = intercalate "\n" $ map (printf (printf "%%%ds" w)) $ lines s -- | Convert a multi-line string to a rectangular string right-padded to the specified width. -- Treats wide characters as double width. padright :: Int -> String -> String padright w "" = concat $ replicate w " " padright w s = intercalate "\n" $ map (printf (printf "%%-%ds" w)) $ lines s -- | Clip a multi-line string to the specified width and height from the top left. cliptopleft :: Int -> Int -> String -> String cliptopleft w h = intercalate "\n" . take h . map (take w) . lines -- | Clip and pad a multi-line string to fill the specified width and height. fitto :: Int -> Int -> String -> String fitto w h s = intercalate "\n" $ take h $ rows ++ repeat blankline where rows = map (fit w) $ lines s fit w = take w . (++ repeat ' ') blankline = replicate w ' ' -- Functions below treat wide (eg CJK) characters as double-width. -- | General-purpose wide-char-aware single-line string layout function. -- It can left- or right-pad a short string to a minimum width. -- It can left- or right-clip a long string to a maximum width, optionally inserting an ellipsis (the third argument). -- It clips and pads on the right when the fourth argument is true, otherwise on the left. -- It treats wide characters as double width. fitString :: Maybe Int -> Maybe Int -> Bool -> Bool -> String -> String fitString mminwidth mmaxwidth ellipsify rightside s = (clip . pad) s where clip :: String -> String clip s = case mmaxwidth of Just w | strWidth s > w -> case rightside of True -> takeWidth (w - length ellipsis) s ++ ellipsis False -> ellipsis ++ reverse (takeWidth (w - length ellipsis) $ reverse s) | otherwise -> s where ellipsis = if ellipsify then ".." else "" Nothing -> s pad :: String -> String pad s = case mminwidth of Just w | sw < w -> case rightside of True -> s ++ replicate (w - sw) ' ' False -> replicate (w - sw) ' ' ++ s | otherwise -> s Nothing -> s where sw = strWidth s -- | A version of fitString that works on multi-line strings, -- separate for now to avoid breakage. -- This will rewrite any line endings to unix newlines. fitStringMulti :: Maybe Int -> Maybe Int -> Bool -> Bool -> String -> String fitStringMulti mminwidth mmaxwidth ellipsify rightside s = (intercalate "\n" . map (fitString mminwidth mmaxwidth ellipsify rightside) . lines) s -- | Left-pad a string to the specified width. -- Treats wide characters as double width. -- Works on multi-line strings too (but will rewrite non-unix line endings). padLeftWide :: Int -> String -> String padLeftWide w "" = replicate w ' ' padLeftWide w s = intercalate "\n" $ map (fitString (Just w) Nothing False False) $ lines s -- XXX not yet replaceable by -- padLeftWide w = fitStringMulti (Just w) Nothing False False -- | Right-pad a string to the specified width. -- Treats wide characters as double width. -- Works on multi-line strings too (but will rewrite non-unix line endings). padRightWide :: Int -> String -> String padRightWide w "" = replicate w ' ' padRightWide w s = intercalate "\n" $ map (fitString (Just w) Nothing False True) $ lines s -- XXX not yet replaceable by -- padRightWide w = fitStringMulti (Just w) Nothing False True -- | Double-width-character-aware string truncation. Take as many -- characters as possible from a string without exceeding the -- specified width. Eg takeWidth 3 "りんご" = "り". takeWidth :: Int -> String -> String takeWidth _ "" = "" takeWidth 0 _ = "" takeWidth w (c:cs) | cw <= w = c:takeWidth (w-cw) cs | otherwise = "" where cw = charWidth c -- from Pandoc (copyright John MacFarlane, GPL) -- see also http://unicode.org/reports/tr11/#Description -- | Calculate the designated render width of a string, taking into -- account wide characters and line breaks (the longest line within a -- multi-line string determines the width ). strWidth :: String -> Int strWidth "" = 0 strWidth s = maximum $ map (foldr (\a b -> charWidth a + b) 0) $ lines s -- | Get the designated render width of a character: 0 for a combining -- character, 1 for a regular character, 2 for a wide character. -- (Wide characters are rendered as exactly double width in apps and -- fonts that support it.) (From Pandoc.) charWidth :: Char -> Int charWidth c = case c of _ | c < '\x0300' -> 1 | c >= '\x0300' && c <= '\x036F' -> 0 -- combining | c >= '\x0370' && c <= '\x10FC' -> 1 | c >= '\x1100' && c <= '\x115F' -> 2 | c >= '\x1160' && c <= '\x11A2' -> 1 | c >= '\x11A3' && c <= '\x11A7' -> 2 | c >= '\x11A8' && c <= '\x11F9' -> 1 | c >= '\x11FA' && c <= '\x11FF' -> 2 | c >= '\x1200' && c <= '\x2328' -> 1 | c >= '\x2329' && c <= '\x232A' -> 2 | c >= '\x232B' && c <= '\x2E31' -> 1 | c >= '\x2E80' && c <= '\x303E' -> 2 | c == '\x303F' -> 1 | c >= '\x3041' && c <= '\x3247' -> 2 | c >= '\x3248' && c <= '\x324F' -> 1 -- ambiguous | c >= '\x3250' && c <= '\x4DBF' -> 2 | c >= '\x4DC0' && c <= '\x4DFF' -> 1 | c >= '\x4E00' && c <= '\xA4C6' -> 2 | c >= '\xA4D0' && c <= '\xA95F' -> 1 | c >= '\xA960' && c <= '\xA97C' -> 2 | c >= '\xA980' && c <= '\xABF9' -> 1 | c >= '\xAC00' && c <= '\xD7FB' -> 2 | c >= '\xD800' && c <= '\xDFFF' -> 1 | c >= '\xE000' && c <= '\xF8FF' -> 1 -- ambiguous | c >= '\xF900' && c <= '\xFAFF' -> 2 | c >= '\xFB00' && c <= '\xFDFD' -> 1 | c >= '\xFE00' && c <= '\xFE0F' -> 1 -- ambiguous | c >= '\xFE10' && c <= '\xFE19' -> 2 | c >= '\xFE20' && c <= '\xFE26' -> 1 | c >= '\xFE30' && c <= '\xFE6B' -> 2 | c >= '\xFE70' && c <= '\xFEFF' -> 1 | c >= '\xFF01' && c <= '\xFF60' -> 2 | c >= '\xFF61' && c <= '\x16A38' -> 1 | c >= '\x1B000' && c <= '\x1B001' -> 2 | c >= '\x1D000' && c <= '\x1F1FF' -> 1 | c >= '\x1F200' && c <= '\x1F251' -> 2 | c >= '\x1F300' && c <= '\x1F773' -> 1 | c >= '\x20000' && c <= '\x3FFFD' -> 2 | otherwise -> 1 hledger-lib-1.2/Hledger/Utils/Test.hs0000644000000000000000000000332113035210046015636 0ustar0000000000000000module Hledger.Utils.Test where import Test.HUnit import Text.Megaparsec -- | Get a Test's label, or the empty string. testName :: Test -> String testName (TestLabel n _) = n testName _ = "" -- | Flatten a Test containing TestLists into a list of single tests. flattenTests :: Test -> [Test] flattenTests (TestLabel _ t@(TestList _)) = flattenTests t flattenTests (TestList ts) = concatMap flattenTests ts flattenTests t = [t] -- | Filter TestLists in a Test, recursively, preserving the structure. filterTests :: (Test -> Bool) -> Test -> Test filterTests p (TestLabel l ts) = TestLabel l (filterTests p ts) filterTests p (TestList ts) = TestList $ filter (any p . flattenTests) $ map (filterTests p) ts filterTests _ t = t -- | Simple way to assert something is some expected value, with no label. is :: (Eq a, Show a) => a -> a -> Assertion a `is` e = assertEqual "" e a -- | Assert a parse result is successful, printing the parse error on failure. assertParse :: (Show t, Show e) => (Either (ParseError t e) a) -> Assertion assertParse parse = either (assertFailure.show) (const (return ())) parse -- | Assert a parse result is successful, printing the parse error on failure. assertParseFailure :: (Either (ParseError t e) a) -> Assertion assertParseFailure parse = either (const $ return ()) (const $ assertFailure "parse should not have succeeded") parse -- | Assert a parse result is some expected value, printing the parse error on failure. assertParseEqual :: (Show a, Eq a, Show t, Show e) => (Either (ParseError t e) a) -> a -> Assertion assertParseEqual parse expected = either (assertFailure.show) (`is` expected) parse printParseError :: (Show a) => a -> IO () printParseError e = do putStr "parse error at "; print e hledger-lib-1.2/Hledger/Utils/Text.hs0000644000000000000000000003754613035210046015663 0ustar0000000000000000-- | Text formatting helpers, ported from String as needed. -- There may be better alternatives out there. {-# LANGUAGE OverloadedStrings #-} module Hledger.Utils.Text -- ( -- -- * misc -- lowercase, -- uppercase, -- underline, -- stripbrackets, -- unbracket, -- -- quoting -- quoteIfSpaced, -- quoteIfNeeded, -- singleQuoteIfNeeded, -- -- quotechars, -- -- whitespacechars, -- escapeDoubleQuotes, -- escapeSingleQuotes, -- escapeQuotes, -- words', -- unwords', -- stripquotes, -- isSingleQuoted, -- isDoubleQuoted, -- -- * single-line layout -- strip, -- lstrip, -- rstrip, -- chomp, -- elideLeft, -- elideRight, -- formatString, -- -- * multi-line layout -- concatTopPadded, -- concatBottomPadded, -- concatOneLine, -- vConcatLeftAligned, -- vConcatRightAligned, -- padtop, -- padbottom, -- padleft, -- padright, -- cliptopleft, -- fitto, -- -- * wide-character-aware layout -- strWidth, -- textTakeWidth, -- fitString, -- fitStringMulti, -- padLeftWide, -- padRightWide -- ) where -- import Data.Char import Data.List import Data.Monoid import Data.Text (Text) import qualified Data.Text as T -- import Text.Parsec -- import Text.Printf (printf) -- import Hledger.Utils.Parse -- import Hledger.Utils.Regex import Hledger.Utils.String (charWidth) -- lowercase, uppercase :: String -> String -- lowercase = map toLower -- uppercase = map toUpper -- | Remove leading and trailing whitespace. textstrip :: Text -> Text textstrip = textlstrip . textrstrip -- | Remove leading whitespace. textlstrip :: Text -> Text textlstrip = T.dropWhile (`elem` (" \t" :: String)) :: Text -> Text -- XXX isSpace ? -- | Remove trailing whitespace. textrstrip = T.reverse . textlstrip . T.reverse textrstrip :: Text -> Text -- -- | Remove trailing newlines/carriage returns. -- chomp :: String -> String -- chomp = reverse . dropWhile (`elem` "\r\n") . reverse -- stripbrackets :: String -> String -- stripbrackets = dropWhile (`elem` "([") . reverse . dropWhile (`elem` "])") . reverse :: String -> String -- elideLeft :: Int -> String -> String -- elideLeft width s = -- if length s > width then ".." ++ reverse (take (width - 2) $ reverse s) else s textElideRight :: Int -> Text -> Text textElideRight width t = if T.length t > width then T.take (width - 2) t <> ".." else t -- -- | Clip and pad a string to a minimum & maximum width, and/or left/right justify it. -- -- Works on multi-line strings too (but will rewrite non-unix line endings). -- formatString :: Bool -> Maybe Int -> Maybe Int -> String -> String -- formatString leftJustified minwidth maxwidth s = intercalate "\n" $ map (printf fmt) $ lines s -- where -- justify = if leftJustified then "-" else "" -- minwidth' = maybe "" show minwidth -- maxwidth' = maybe "" (("."++).show) maxwidth -- fmt = "%" ++ justify ++ minwidth' ++ maxwidth' ++ "s" -- underline :: String -> String -- underline s = s' ++ replicate (length s) '-' ++ "\n" -- where s' -- | last s == '\n' = s -- | otherwise = s ++ "\n" -- | Wrap a string in double quotes, and \-prefix any embedded single -- quotes, if it contains whitespace and is not already single- or -- double-quoted. quoteIfSpaced :: T.Text -> T.Text quoteIfSpaced s | isSingleQuoted s || isDoubleQuoted s = s | not $ any (`elem` (T.unpack s)) whitespacechars = s | otherwise = "'"<>escapeSingleQuotes s<>"'" -- -- | Wrap a string in double quotes, and \-prefix any embedded single -- -- quotes, if it contains whitespace and is not already single- or -- -- double-quoted. -- quoteIfSpaced :: String -> String -- quoteIfSpaced s | isSingleQuoted s || isDoubleQuoted s = s -- | not $ any (`elem` s) whitespacechars = s -- | otherwise = "'"++escapeSingleQuotes s++"'" -- -- | Double-quote this string if it contains whitespace, single quotes -- -- or double-quotes, escaping the quotes as needed. -- quoteIfNeeded :: T.Text -> T.Text -- quoteIfNeeded s | any (`elem` T.unpack s) (quotechars++whitespacechars) = "\"" <> escapeDoubleQuotes s <> "\"" -- | otherwise = s -- -- | Single-quote this string if it contains whitespace or double-quotes. -- -- No good for strings containing single quotes. -- singleQuoteIfNeeded :: String -> String -- singleQuoteIfNeeded s | any (`elem` s) whitespacechars = "'"++s++"'" -- | otherwise = s quotechars, whitespacechars :: [Char] quotechars = "'\"" whitespacechars = " \t\n\r" escapeDoubleQuotes :: T.Text -> T.Text escapeDoubleQuotes = T.replace "\"" "\"" escapeSingleQuotes :: T.Text -> T.Text escapeSingleQuotes = T.replace "'" "\'" -- escapeQuotes :: String -> String -- escapeQuotes = regexReplace "([\"'])" "\\1" -- -- | Quote-aware version of words - don't split on spaces which are inside quotes. -- -- NB correctly handles "a'b" but not "''a''". Can raise an error if parsing fails. -- words' :: String -> [String] -- words' "" = [] -- words' s = map stripquotes $ fromparse $ parsewith p s -- where -- p = do ss <- (singleQuotedPattern <|> doubleQuotedPattern <|> pattern) `sepBy` many1 spacenonewline -- -- eof -- return ss -- pattern = many (noneOf whitespacechars) -- singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf "'") -- doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf "\"") -- -- | Quote-aware version of unwords - single-quote strings which contain whitespace -- unwords' :: [Text] -> Text -- unwords' = T.unwords . map quoteIfNeeded -- | Strip one matching pair of single or double quotes on the ends of a string. stripquotes :: Text -> Text stripquotes s = if isSingleQuoted s || isDoubleQuoted s then T.init $ T.tail s else s isSingleQuoted :: Text -> Bool isSingleQuoted s = T.length (T.take 2 s) == 2 && T.head s == '\'' && T.last s == '\'' isDoubleQuoted :: Text -> Bool isDoubleQuoted s = T.length (T.take 2 s) == 2 && T.head s == '"' && T.last s == '"' textUnbracket :: Text -> Text textUnbracket s | (T.head s == '[' && T.last s == ']') || (T.head s == '(' && T.last s == ')') = T.init $ T.tail s | otherwise = s -- | Join several multi-line strings as side-by-side rectangular strings of the same height, top-padded. -- Treats wide characters as double width. textConcatTopPadded :: [Text] -> Text textConcatTopPadded ts = T.intercalate "\n" $ map T.concat $ transpose padded where lss = map T.lines ts :: [[Text]] h = maximum $ map length lss ypad ls = replicate (difforzero h (length ls)) "" ++ ls xpad ls = map (textPadLeftWide w) ls where w | null ls = 0 | otherwise = maximum $ map textWidth ls padded = map (xpad . ypad) lss :: [[Text]] -- -- | Join several multi-line strings as side-by-side rectangular strings of the same height, bottom-padded. -- -- Treats wide characters as double width. -- concatBottomPadded :: [String] -> String -- concatBottomPadded strs = intercalate "\n" $ map concat $ transpose padded -- where -- lss = map lines strs -- h = maximum $ map length lss -- ypad ls = ls ++ replicate (difforzero h (length ls)) "" -- xpad ls = map (padRightWide w) ls where w | null ls = 0 -- | otherwise = maximum $ map strWidth ls -- padded = map (xpad . ypad) lss -- -- | Join multi-line strings horizontally, after compressing each of -- -- them to a single line with a comma and space between each original line. -- concatOneLine :: [String] -> String -- concatOneLine strs = concat $ map ((intercalate ", ").lines) strs -- -- | Join strings vertically, left-aligned and right-padded. -- vConcatLeftAligned :: [String] -> String -- vConcatLeftAligned ss = intercalate "\n" $ map showfixedwidth ss -- where -- showfixedwidth = printf (printf "%%-%ds" width) -- width = maximum $ map length ss -- -- | Join strings vertically, right-aligned and left-padded. -- vConcatRightAligned :: [String] -> String -- vConcatRightAligned ss = intercalate "\n" $ map showfixedwidth ss -- where -- showfixedwidth = printf (printf "%%%ds" width) -- width = maximum $ map length ss -- -- | Convert a multi-line string to a rectangular string top-padded to the specified height. -- padtop :: Int -> String -> String -- padtop h s = intercalate "\n" xpadded -- where -- ls = lines s -- sh = length ls -- sw | null ls = 0 -- | otherwise = maximum $ map length ls -- ypadded = replicate (difforzero h sh) "" ++ ls -- xpadded = map (padleft sw) ypadded -- -- | Convert a multi-line string to a rectangular string bottom-padded to the specified height. -- padbottom :: Int -> String -> String -- padbottom h s = intercalate "\n" xpadded -- where -- ls = lines s -- sh = length ls -- sw | null ls = 0 -- | otherwise = maximum $ map length ls -- ypadded = ls ++ replicate (difforzero h sh) "" -- xpadded = map (padleft sw) ypadded difforzero :: (Num a, Ord a) => a -> a -> a difforzero a b = maximum [(a - b), 0] -- -- | Convert a multi-line string to a rectangular string left-padded to the specified width. -- -- Treats wide characters as double width. -- padleft :: Int -> String -> String -- padleft w "" = concat $ replicate w " " -- padleft w s = intercalate "\n" $ map (printf (printf "%%%ds" w)) $ lines s -- -- | Convert a multi-line string to a rectangular string right-padded to the specified width. -- -- Treats wide characters as double width. -- padright :: Int -> String -> String -- padright w "" = concat $ replicate w " " -- padright w s = intercalate "\n" $ map (printf (printf "%%-%ds" w)) $ lines s -- -- | Clip a multi-line string to the specified width and height from the top left. -- cliptopleft :: Int -> Int -> String -> String -- cliptopleft w h = intercalate "\n" . take h . map (take w) . lines -- -- | Clip and pad a multi-line string to fill the specified width and height. -- fitto :: Int -> Int -> String -> String -- fitto w h s = intercalate "\n" $ take h $ rows ++ repeat blankline -- where -- rows = map (fit w) $ lines s -- fit w = take w . (++ repeat ' ') -- blankline = replicate w ' ' -- -- Functions below treat wide (eg CJK) characters as double-width. -- | General-purpose wide-char-aware single-line text layout function. -- It can left- or right-pad a short string to a minimum width. -- It can left- or right-clip a long string to a maximum width, optionally inserting an ellipsis (the third argument). -- It clips and pads on the right when the fourth argument is true, otherwise on the left. -- It treats wide characters as double width. fitText :: Maybe Int -> Maybe Int -> Bool -> Bool -> Text -> Text fitText mminwidth mmaxwidth ellipsify rightside s = (clip . pad) s where clip :: Text -> Text clip s = case mmaxwidth of Just w | textWidth s > w -> case rightside of True -> textTakeWidth (w - T.length ellipsis) s <> ellipsis False -> ellipsis <> T.reverse (textTakeWidth (w - T.length ellipsis) $ T.reverse s) | otherwise -> s where ellipsis = if ellipsify then ".." else "" Nothing -> s pad :: Text -> Text pad s = case mminwidth of Just w | sw < w -> case rightside of True -> s <> T.replicate (w - sw) " " False -> T.replicate (w - sw) " " <> s | otherwise -> s Nothing -> s where sw = textWidth s -- -- | A version of fitString that works on multi-line strings, -- -- separate for now to avoid breakage. -- -- This will rewrite any line endings to unix newlines. -- fitStringMulti :: Maybe Int -> Maybe Int -> Bool -> Bool -> String -> String -- fitStringMulti mminwidth mmaxwidth ellipsify rightside s = -- (intercalate "\n" . map (fitString mminwidth mmaxwidth ellipsify rightside) . lines) s -- | Left-pad a text to the specified width. -- Treats wide characters as double width. -- Works on multi-line texts too (but will rewrite non-unix line endings). textPadLeftWide :: Int -> Text -> Text textPadLeftWide w "" = T.replicate w " " textPadLeftWide w s = T.intercalate "\n" $ map (fitText (Just w) Nothing False False) $ T.lines s -- XXX not yet replaceable by -- padLeftWide w = fitStringMulti (Just w) Nothing False False -- | Right-pad a string to the specified width. -- Treats wide characters as double width. -- Works on multi-line strings too (but will rewrite non-unix line endings). textPadRightWide :: Int -> Text -> Text textPadRightWide w "" = T.replicate w " " textPadRightWide w s = T.intercalate "\n" $ map (fitText (Just w) Nothing False True) $ T.lines s -- XXX not yet replaceable by -- padRightWide w = fitStringMulti (Just w) Nothing False True -- | Double-width-character-aware string truncation. Take as many -- characters as possible from a string without exceeding the -- specified width. Eg textTakeWidth 3 "りんご" = "り". textTakeWidth :: Int -> Text -> Text textTakeWidth _ "" = "" textTakeWidth 0 _ = "" textTakeWidth w t | not (T.null t), let c = T.head t, let cw = charWidth c, cw <= w = T.cons c $ textTakeWidth (w-cw) (T.tail t) | otherwise = "" -- -- from Pandoc (copyright John MacFarlane, GPL) -- -- see also http://unicode.org/reports/tr11/#Description -- | Calculate the designated render width of a string, taking into -- account wide characters and line breaks (the longest line within a -- multi-line string determines the width ). textWidth :: Text -> Int textWidth "" = 0 textWidth s = maximum $ map (T.foldr (\a b -> charWidth a + b) 0) $ T.lines s -- -- | Get the designated render width of a character: 0 for a combining -- -- character, 1 for a regular character, 2 for a wide character. -- -- (Wide characters are rendered as exactly double width in apps and -- -- fonts that support it.) (From Pandoc.) -- charWidth :: Char -> Int -- charWidth c = -- case c of -- _ | c < '\x0300' -> 1 -- | c >= '\x0300' && c <= '\x036F' -> 0 -- combining -- | c >= '\x0370' && c <= '\x10FC' -> 1 -- | c >= '\x1100' && c <= '\x115F' -> 2 -- | c >= '\x1160' && c <= '\x11A2' -> 1 -- | c >= '\x11A3' && c <= '\x11A7' -> 2 -- | c >= '\x11A8' && c <= '\x11F9' -> 1 -- | c >= '\x11FA' && c <= '\x11FF' -> 2 -- | c >= '\x1200' && c <= '\x2328' -> 1 -- | c >= '\x2329' && c <= '\x232A' -> 2 -- | c >= '\x232B' && c <= '\x2E31' -> 1 -- | c >= '\x2E80' && c <= '\x303E' -> 2 -- | c == '\x303F' -> 1 -- | c >= '\x3041' && c <= '\x3247' -> 2 -- | c >= '\x3248' && c <= '\x324F' -> 1 -- ambiguous -- | c >= '\x3250' && c <= '\x4DBF' -> 2 -- | c >= '\x4DC0' && c <= '\x4DFF' -> 1 -- | c >= '\x4E00' && c <= '\xA4C6' -> 2 -- | c >= '\xA4D0' && c <= '\xA95F' -> 1 -- | c >= '\xA960' && c <= '\xA97C' -> 2 -- | c >= '\xA980' && c <= '\xABF9' -> 1 -- | c >= '\xAC00' && c <= '\xD7FB' -> 2 -- | c >= '\xD800' && c <= '\xDFFF' -> 1 -- | c >= '\xE000' && c <= '\xF8FF' -> 1 -- ambiguous -- | c >= '\xF900' && c <= '\xFAFF' -> 2 -- | c >= '\xFB00' && c <= '\xFDFD' -> 1 -- | c >= '\xFE00' && c <= '\xFE0F' -> 1 -- ambiguous -- | c >= '\xFE10' && c <= '\xFE19' -> 2 -- | c >= '\xFE20' && c <= '\xFE26' -> 1 -- | c >= '\xFE30' && c <= '\xFE6B' -> 2 -- | c >= '\xFE70' && c <= '\xFEFF' -> 1 -- | c >= '\xFF01' && c <= '\xFF60' -> 2 -- | c >= '\xFF61' && c <= '\x16A38' -> 1 -- | c >= '\x1B000' && c <= '\x1B001' -> 2 -- | c >= '\x1D000' && c <= '\x1F1FF' -> 1 -- | c >= '\x1F200' && c <= '\x1F251' -> 2 -- | c >= '\x1F300' && c <= '\x1F773' -> 1 -- | c >= '\x20000' && c <= '\x3FFFD' -> 2 -- | otherwise -> 1 hledger-lib-1.2/Hledger/Utils/Tree.hs0000644000000000000000000000525613035210046015627 0ustar0000000000000000module Hledger.Utils.Tree where -- import Data.Char import Data.List (foldl') import qualified Data.Map as M import Data.Tree -- import Text.Megaparsec -- import Text.Printf import Hledger.Utils.Regex -- import Hledger.Utils.UTF8IOCompat (error') -- standard tree helpers root = rootLabel subs = subForest branches = subForest -- | List just the leaf nodes of a tree leaves :: Tree a -> [a] leaves (Node v []) = [v] leaves (Node _ branches) = concatMap leaves branches -- | get the sub-tree rooted at the first (left-most, depth-first) occurrence -- of the specified node value subtreeat :: Eq a => a -> Tree a -> Maybe (Tree a) subtreeat v t | root t == v = Just t | otherwise = subtreeinforest v $ subs t -- | get the sub-tree for the specified node value in the first tree in -- forest in which it occurs. subtreeinforest :: Eq a => a -> [Tree a] -> Maybe (Tree a) subtreeinforest _ [] = Nothing subtreeinforest v (t:ts) = case (subtreeat v t) of Just t' -> Just t' Nothing -> subtreeinforest v ts -- | remove all nodes past a certain depth treeprune :: Int -> Tree a -> Tree a treeprune 0 t = Node (root t) [] treeprune d t = Node (root t) (map (treeprune $ d-1) $ branches t) -- | apply f to all tree nodes treemap :: (a -> b) -> Tree a -> Tree b treemap f t = Node (f $ root t) (map (treemap f) $ branches t) -- | remove all subtrees whose nodes do not fulfill predicate treefilter :: (a -> Bool) -> Tree a -> Tree a treefilter f t = Node (root t) (map (treefilter f) $ filter (treeany f) $ branches t) -- | is predicate true in any node of tree ? treeany :: (a -> Bool) -> Tree a -> Bool treeany f t = f (root t) || any (treeany f) (branches t) -- treedrop -- remove the leaves which do fulfill predicate. -- treedropall -- do this repeatedly. -- | show a compact ascii representation of a tree showtree :: Show a => Tree a -> String showtree = unlines . filter (regexMatches "[^ \\|]") . lines . drawTree . treemap show -- | show a compact ascii representation of a forest showforest :: Show a => Forest a -> String showforest = concatMap showtree -- | An efficient-to-build tree suggested by Cale Gibbard, probably -- better than accountNameTreeFrom. newtype FastTree a = T (M.Map a (FastTree a)) deriving (Show, Eq, Ord) emptyTree = T M.empty mergeTrees :: (Ord a) => FastTree a -> FastTree a -> FastTree a mergeTrees (T m) (T m') = T (M.unionWith mergeTrees m m') treeFromPath :: [a] -> FastTree a treeFromPath [] = T M.empty treeFromPath (x:xs) = T (M.singleton x (treeFromPath xs)) treeFromPaths :: (Ord a) => [[a]] -> FastTree a treeFromPaths = foldl' mergeTrees emptyTree . map treeFromPath hledger-lib-1.2/Hledger/Utils/UTF8IOCompat.hs0000644000000000000000000001024513066746043017063 0ustar0000000000000000{-# LANGUAGE CPP #-} {- | UTF-8 aware string IO functions that will work across multiple platforms and GHC versions. Includes code from Text.Pandoc.UTF8 ((C) 2010 John MacFarlane). Example usage: import Prelude hiding (readFile,writeFile,appendFile,getContents,putStr,putStrLn) import UTF8IOCompat (readFile,writeFile,appendFile,getContents,putStr,putStrLn) import UTF8IOCompat (SystemString,fromSystemString,toSystemString,error',userError') 2013/4/10 update: we now trust that current GHC versions & platforms do the right thing, so this file is a no-op and on its way to being removed. Not carefully tested. -} -- TODO obsolete ? module Hledger.Utils.UTF8IOCompat ( readFile, writeFile, appendFile, getContents, hGetContents, putStr, putStrLn, hPutStr, hPutStrLn, -- SystemString, fromSystemString, toSystemString, error', userError', usageError, ) where -- import Control.Monad (liftM) -- import qualified Data.ByteString.Lazy as B -- import qualified Data.ByteString.Lazy.Char8 as B8 -- import qualified Data.ByteString.Lazy.UTF8 as U8 (toString, fromString) import Prelude hiding (readFile, writeFile, appendFile, getContents, putStr, putStrLn) import System.IO -- (Handle) -- #if __GLASGOW_HASKELL__ < 702 -- import Codec.Binary.UTF8.String as UTF8 (decodeString, encodeString, isUTF8Encoded) -- import System.Info (os) -- #endif -- bom :: B.ByteString -- bom = B.pack [0xEF, 0xBB, 0xBF] -- stripBOM :: B.ByteString -> B.ByteString -- stripBOM s | bom `B.isPrefixOf` s = B.drop 3 s -- stripBOM s = s -- readFile :: FilePath -> IO String -- readFile = liftM (U8.toString . stripBOM) . B.readFile -- writeFile :: FilePath -> String -> IO () -- writeFile f = B.writeFile f . U8.fromString -- appendFile :: FilePath -> String -> IO () -- appendFile f = B.appendFile f . U8.fromString -- getContents :: IO String -- getContents = liftM (U8.toString . stripBOM) B.getContents -- hGetContents :: Handle -> IO String -- hGetContents h = liftM (U8.toString . stripBOM) (B.hGetContents h) -- putStr :: String -> IO () -- putStr = bs_putStr . U8.fromString -- putStrLn :: String -> IO () -- putStrLn = bs_putStrLn . U8.fromString -- hPutStr :: Handle -> String -> IO () -- hPutStr h = bs_hPutStr h . U8.fromString -- hPutStrLn :: Handle -> String -> IO () -- hPutStrLn h = bs_hPutStrLn h . U8.fromString -- -- span GHC versions including 6.12.3 - 7.4.1: -- bs_putStr = B8.putStr -- bs_putStrLn = B8.putStrLn -- bs_hPutStr = B8.hPut -- bs_hPutStrLn h bs = B8.hPut h bs >> B8.hPut h (B.singleton 0x0a) -- | A string received from or being passed to the operating system, such -- as a file path, command-line argument, or environment variable name or -- value. With GHC versions before 7.2 on some platforms (posix) these are -- typically encoded. When converting, we assume the encoding is UTF-8 (cf -- ). type SystemString = String -- | Convert a system string to an ordinary string, decoding from UTF-8 if -- it appears to be UTF8-encoded and GHC version is less than 7.2. fromSystemString :: SystemString -> String -- #if __GLASGOW_HASKELL__ < 702 -- fromSystemString s = if UTF8.isUTF8Encoded s then UTF8.decodeString s else s -- #else fromSystemString = id -- #endif -- | Convert a unicode string to a system string, encoding with UTF-8 if -- we are on a posix platform with GHC < 7.2. toSystemString :: String -> SystemString -- #if __GLASGOW_HASKELL__ < 702 -- toSystemString = case os of -- "unix" -> UTF8.encodeString -- "linux" -> UTF8.encodeString -- "darwin" -> UTF8.encodeString -- _ -> id -- #else toSystemString = id -- #endif -- | A SystemString-aware version of error. error' :: String -> a error' = #if __GLASGOW_HASKELL__ < 800 -- (easier than if base < 4.9) error . toSystemString #else errorWithoutStackTrace . toSystemString #endif -- | A SystemString-aware version of userError. userError' :: String -> IOError userError' = userError . toSystemString -- | A SystemString-aware version of error that adds a usage hint. usageError :: String -> a usageError = error' . (++ " (use -h to see usage)") hledger-lib-1.2/tests/hunittests.hs0000644000000000000000000000031713035210046015623 0ustar0000000000000000import Hledger (tests_Hledger) import Test.Framework.Providers.HUnit (hUnitTestToTests) import Test.Framework.Runners.Console (defaultMain) main :: IO () main = defaultMain $ hUnitTestToTests tests_Hledger hledger-lib-1.2/Hledger.hs0000644000000000000000000000066013035210046013622 0ustar0000000000000000module Hledger ( module X ,tests_Hledger ) where import Test.HUnit import Hledger.Data as X import Hledger.Query as X import Hledger.Read as X hiding (samplejournal) import Hledger.Reports as X import Hledger.Utils as X tests_Hledger = TestList [ tests_Hledger_Data ,tests_Hledger_Query ,tests_Hledger_Read ,tests_Hledger_Reports ] hledger-lib-1.2/Hledger/Data.hs0000644000000000000000000000372413066173044014511 0ustar0000000000000000{-| The Hledger.Data library allows parsing and querying of C++ ledger-style journal files. It generally provides a compatible subset of C++ ledger's functionality. This package re-exports all the Hledger.Data.* modules (except UTF8, which requires an explicit import.) -} module Hledger.Data ( module Hledger.Data.Account, module Hledger.Data.AccountName, module Hledger.Data.Amount, module Hledger.Data.Commodity, module Hledger.Data.Dates, module Hledger.Data.Journal, module Hledger.Data.Ledger, module Hledger.Data.MarketPrice, module Hledger.Data.Period, module Hledger.Data.Posting, module Hledger.Data.RawOptions, module Hledger.Data.StringFormat, module Hledger.Data.Timeclock, module Hledger.Data.Transaction, module Hledger.Data.Types, tests_Hledger_Data ) where import Test.HUnit import Hledger.Data.Account import Hledger.Data.AccountName import Hledger.Data.Amount import Hledger.Data.Commodity import Hledger.Data.Dates import Hledger.Data.Journal import Hledger.Data.Ledger import Hledger.Data.MarketPrice import Hledger.Data.Period import Hledger.Data.Posting import Hledger.Data.RawOptions import Hledger.Data.StringFormat import Hledger.Data.Timeclock import Hledger.Data.Transaction import Hledger.Data.Types tests_Hledger_Data :: Test tests_Hledger_Data = TestList [ tests_Hledger_Data_Account ,tests_Hledger_Data_AccountName ,tests_Hledger_Data_Amount ,tests_Hledger_Data_Commodity ,tests_Hledger_Data_Journal ,tests_Hledger_Data_MarketPrice ,tests_Hledger_Data_Ledger ,tests_Hledger_Data_Posting -- ,tests_Hledger_Data_RawOptions -- ,tests_Hledger_Data_StringFormat ,tests_Hledger_Data_Timeclock ,tests_Hledger_Data_Transaction -- ,tests_Hledger_Data_Types ] hledger-lib-1.2/Hledger/Data/Account.hs0000644000000000000000000001606313042200120016061 0ustar0000000000000000{-# LANGUAGE RecordWildCards, StandaloneDeriving, OverloadedStrings #-} {-| An 'Account' has a name, a list of subaccounts, an optional parent account, and subaccounting-excluding and -including balances. -} module Hledger.Data.Account where import Data.List import Data.Maybe import qualified Data.Map as M import Safe (headMay, lookupJustDef) import Test.HUnit import Text.Printf import Hledger.Data.AccountName import Hledger.Data.Amount import Hledger.Data.Posting() import Hledger.Data.Types import Hledger.Utils -- deriving instance Show Account instance Show Account where show Account{..} = printf "Account %s (boring:%s, postings:%d, ebalance:%s, ibalance:%s)" aname (if aboring then "y" else "n" :: String) anumpostings (showMixedAmount aebalance) (showMixedAmount aibalance) instance Eq Account where (==) a b = aname a == aname b -- quick equality test for speed -- and -- [ aname a == aname b -- -- , aparent a == aparent b -- avoid infinite recursion -- , asubs a == asubs b -- , aebalance a == aebalance b -- , aibalance a == aibalance b -- ] nullacct = Account { aname = "" , aparent = Nothing , asubs = [] , anumpostings = 0 , aebalance = nullmixedamt , aibalance = nullmixedamt , aboring = False } -- | Derive 1. an account tree and 2. each account's total exclusive -- and inclusive changes from a list of postings. -- This is the core of the balance command (and of *ledger). -- The accounts are returned as a list in flattened tree order, -- and also reference each other as a tree. -- (The first account is the root of the tree.) accountsFromPostings :: [Posting] -> [Account] accountsFromPostings ps = let acctamts = [(paccount p,pamount p) | p <- ps] grouped = groupBy (\a b -> fst a == fst b) $ sort $ acctamts counted = [(a, length acctamts) | acctamts@((a,_):_) <- grouped] summed = map (\as@((aname,_):_) -> (aname, sumStrict $ map snd as)) grouped -- always non-empty nametree = treeFromPaths $ map (expandAccountName . fst) summed acctswithnames = nameTreeToAccount "root" nametree acctswithnumps = mapAccounts setnumps acctswithnames where setnumps a = a{anumpostings=fromMaybe 0 $ lookup (aname a) counted} acctswithebals = mapAccounts setebalance acctswithnumps where setebalance a = a{aebalance=lookupJustDef nullmixedamt (aname a) summed} acctswithibals = sumAccounts acctswithebals acctswithparents = tieAccountParents acctswithibals acctsflattened = flattenAccounts acctswithparents in acctsflattened -- | Convert an AccountName tree to an Account tree nameTreeToAccount :: AccountName -> FastTree AccountName -> Account nameTreeToAccount rootname (T m) = nullacct{ aname=rootname, asubs=map (uncurry nameTreeToAccount) $ M.assocs m } -- | Tie the knot so all subaccounts' parents are set correctly. tieAccountParents :: Account -> Account tieAccountParents = tie Nothing where tie parent a@Account{..} = a' where a' = a{aparent=parent, asubs=map (tie (Just a')) asubs} -- | Get this account's parent accounts, from the nearest up to the root. parentAccounts :: Account -> [Account] parentAccounts Account{aparent=Nothing} = [] parentAccounts Account{aparent=Just a} = a:parentAccounts a -- | List the accounts at each level of the account tree. accountsLevels :: Account -> [[Account]] accountsLevels = takeWhile (not . null) . iterate (concatMap asubs) . (:[]) -- | Map a (non-tree-structure-modifying) function over this and sub accounts. mapAccounts :: (Account -> Account) -> Account -> Account mapAccounts f a = f a{asubs = map (mapAccounts f) $ asubs a} -- | Is the predicate true on any of this account or its subaccounts ? anyAccounts :: (Account -> Bool) -> Account -> Bool anyAccounts p a | p a = True | otherwise = any (anyAccounts p) $ asubs a -- | Add subaccount-inclusive balances to an account tree. sumAccounts :: Account -> Account sumAccounts a | null $ asubs a = a{aibalance=aebalance a} | otherwise = a{aibalance=ibal, asubs=subs} where subs = map sumAccounts $ asubs a ibal = sum $ aebalance a : map aibalance subs -- | Remove all subaccounts below a certain depth. clipAccounts :: Int -> Account -> Account clipAccounts 0 a = a{asubs=[]} clipAccounts d a = a{asubs=subs} where subs = map (clipAccounts (d-1)) $ asubs a -- | Remove subaccounts below the specified depth, aggregating their balance at the depth limit -- (accounts at the depth limit will have any sub-balances merged into their exclusive balance). clipAccountsAndAggregate :: Int -> [Account] -> [Account] clipAccountsAndAggregate d as = combined where clipped = [a{aname=clipOrEllipsifyAccountName d $ aname a} | a <- as] combined = [a{aebalance=sum (map aebalance same)} | same@(a:_) <- groupBy (\a1 a2 -> aname a1 == aname a2) clipped] {- test cases, assuming d=1: assets:cash 1 1 assets:checking 1 1 -> as: [assets:cash 1 1, assets:checking 1 1] clipped: [assets 1 1, assets 1 1] combined: [assets 2 2] assets 0 2 assets:cash 1 1 assets:checking 1 1 -> as: [assets 0 2, assets:cash 1 1, assets:checking 1 1] clipped: [assets 0 2, assets 1 1, assets 1 1] combined: [assets 2 2] assets 0 2 assets:bank 1 2 assets:bank:checking 1 1 -> as: [assets 0 2, assets:bank 1 2, assets:bank:checking 1 1] clipped: [assets 0 2, assets 1 2, assets 1 1] combined: [assets 2 2] -} -- | Remove all leaf accounts and subtrees matching a predicate. pruneAccounts :: (Account -> Bool) -> Account -> Maybe Account pruneAccounts p = headMay . prune where prune a | null prunedsubs = if p a then [] else [a'] | otherwise = [a'] where prunedsubs = concatMap prune $ asubs a a' = a{asubs=prunedsubs} -- | Flatten an account tree into a list, which is sometimes -- convenient. Note since accounts link to their parents/subs, the -- tree's structure remains intact and can still be used. It's a tree/list! flattenAccounts :: Account -> [Account] flattenAccounts a = squish a [] where squish a as = a : Prelude.foldr squish as (asubs a) -- | Filter an account tree (to a list). filterAccounts :: (Account -> Bool) -> Account -> [Account] filterAccounts p a | p a = a : concatMap (filterAccounts p) (asubs a) | otherwise = concatMap (filterAccounts p) (asubs a) -- | Search an account list by name. lookupAccount :: AccountName -> [Account] -> Maybe Account lookupAccount a = find ((==a).aname) -- debug helpers printAccounts :: Account -> IO () printAccounts = putStrLn . showAccounts showAccounts = unlines . map showAccountDebug . flattenAccounts showAccountsBoringFlag = unlines . map (show . aboring) . flattenAccounts showAccountDebug a = printf "%-25s %4s %4s %s" (aname a) (showMixedAmount $ aebalance a) (showMixedAmount $ aibalance a) (if aboring a then "b" else " " :: String) tests_Hledger_Data_Account = TestList [ ] hledger-lib-1.2/Hledger/Data/AccountName.hs0000644000000000000000000001774413066173044016715 0ustar0000000000000000{-# LANGUAGE NoMonomorphismRestriction, OverloadedStrings #-} {-| 'AccountName's are strings like @assets:cash:petty@, with multiple components separated by ':'. From a set of these we derive the account hierarchy. -} module Hledger.Data.AccountName where import Data.List import Data.Monoid import Data.Text (Text) import qualified Data.Text as T import Data.Tree import Test.HUnit import Text.Printf import Hledger.Data.Types import Hledger.Utils acctsepchar :: Char acctsepchar = ':' acctsep :: Text acctsep = T.pack [acctsepchar] -- accountNameComponents :: AccountName -> [String] -- accountNameComponents = splitAtElement acctsepchar accountNameComponents :: AccountName -> [Text] accountNameComponents = T.splitOn acctsep accountNameFromComponents :: [Text] -> AccountName accountNameFromComponents = T.intercalate acctsep accountLeafName :: AccountName -> Text accountLeafName = last . accountNameComponents -- | Truncate all account name components but the last to two characters. accountSummarisedName :: AccountName -> Text accountSummarisedName a -- length cs > 1 = take 2 (head cs) ++ ":" ++ a' | length cs > 1 = (T.intercalate ":" (map (T.take 2) $ init cs)) <> ":" <> a' | otherwise = a' where cs = accountNameComponents a a' = accountLeafName a accountNameLevel :: AccountName -> Int accountNameLevel "" = 0 accountNameLevel a = T.length (T.filter (==acctsepchar) a) + 1 accountNameDrop :: Int -> AccountName -> AccountName accountNameDrop n = accountNameFromComponents . drop n . accountNameComponents -- | ["a:b:c","d:e"] -> ["a","a:b","a:b:c","d","d:e"] expandAccountNames :: [AccountName] -> [AccountName] expandAccountNames as = nub $ concatMap expandAccountName as -- | "a:b:c" -> ["a","a:b","a:b:c"] expandAccountName :: AccountName -> [AccountName] expandAccountName = map accountNameFromComponents . tail . inits . accountNameComponents -- | ["a:b:c","d:e"] -> ["a","d"] topAccountNames :: [AccountName] -> [AccountName] topAccountNames as = [a | a <- expandAccountNames as, accountNameLevel a == 1] parentAccountName :: AccountName -> AccountName parentAccountName = accountNameFromComponents . init . accountNameComponents parentAccountNames :: AccountName -> [AccountName] parentAccountNames a = parentAccountNames' $ parentAccountName a where parentAccountNames' "" = [] parentAccountNames' a = a : parentAccountNames' (parentAccountName a) -- | Is the first account a parent or other ancestor of (and not the same as) the second ? isAccountNamePrefixOf :: AccountName -> AccountName -> Bool isAccountNamePrefixOf = T.isPrefixOf . (<> acctsep) isSubAccountNameOf :: AccountName -> AccountName -> Bool s `isSubAccountNameOf` p = (p `isAccountNamePrefixOf` s) && (accountNameLevel s == (accountNameLevel p + 1)) -- | From a list of account names, select those which are direct -- subaccounts of the given account name. subAccountNamesFrom :: [AccountName] -> AccountName -> [AccountName] subAccountNamesFrom accts a = filter (`isSubAccountNameOf` a) accts -- | Convert a list of account names to a tree. accountNameTreeFrom :: [AccountName] -> Tree AccountName accountNameTreeFrom accts = Node "root" (accounttreesfrom (topAccountNames accts)) where accounttreesfrom :: [AccountName] -> [Tree AccountName] accounttreesfrom [] = [] accounttreesfrom as = [Node a (accounttreesfrom $ subs a) | a <- as] subs = subAccountNamesFrom (expandAccountNames accts) nullaccountnametree = Node "root" [] -- | Elide an account name to fit in the specified width. -- From the ledger 2.6 news: -- -- @ -- What Ledger now does is that if an account name is too long, it will -- start abbreviating the first parts of the account name down to two -- letters in length. If this results in a string that is still too -- long, the front will be elided -- not the end. For example: -- -- Expenses:Cash ; OK, not too long -- Ex:Wednesday:Cash ; "Expenses" was abbreviated to fit -- Ex:We:Afternoon:Cash ; "Expenses" and "Wednesday" abbreviated -- ; Expenses:Wednesday:Afternoon:Lunch:Snack:Candy:Chocolate:Cash -- ..:Af:Lu:Sn:Ca:Ch:Cash ; Abbreviated and elided! -- @ elideAccountName :: Int -> AccountName -> AccountName elideAccountName width s -- XXX special case for transactions register's multi-account pseudo-names | " (split)" `T.isSuffixOf` s = let names = T.splitOn ", " $ T.take (T.length s - 8) s widthpername = (max 0 (width - 8 - 2 * (max 1 (length names) - 1))) `div` length names in fitText Nothing (Just width) True False $ (<>" (split)") $ T.intercalate ", " $ [accountNameFromComponents $ elideparts widthpername [] $ accountNameComponents s' | s' <- names] | otherwise = fitText Nothing (Just width) True False $ accountNameFromComponents $ elideparts width [] $ accountNameComponents s where elideparts :: Int -> [Text] -> [Text] -> [Text] elideparts width done ss | textWidth (accountNameFromComponents $ done++ss) <= width = done++ss | length ss > 1 = elideparts width (done++[textTakeWidth 2 $ head ss]) (tail ss) | otherwise = done++ss -- | Keep only the first n components of an account name, where n -- is a positive integer. If n is 0, returns the empty string. clipAccountName :: Int -> AccountName -> AccountName clipAccountName n = accountNameFromComponents . take n . accountNameComponents -- | Keep only the first n components of an account name, where n -- is a positive integer. If n is 0, returns "...". clipOrEllipsifyAccountName :: Int -> AccountName -> AccountName clipOrEllipsifyAccountName 0 = const "..." clipOrEllipsifyAccountName n = accountNameFromComponents . take n . accountNameComponents -- | Escape an AccountName for use within a regular expression. -- >>> putStr $ escapeName "First?!#$*?$(*) !@^#*? %)*!@#" -- First\?!#\$\*\?\$\(\*\) !@\^#\*\? %\)\*!@# escapeName :: AccountName -> Regexp escapeName = regexReplaceBy "[[?+|()*\\\\^$]" ("\\" <>) . T.unpack -- | Convert an account name to a regular expression matching it and its subaccounts. accountNameToAccountRegex :: AccountName -> Regexp accountNameToAccountRegex "" = "" accountNameToAccountRegex a = printf "^%s(:|$)" (escapeName a) -- | Convert an account name to a regular expression matching it but not its subaccounts. accountNameToAccountOnlyRegex :: AccountName -> Regexp accountNameToAccountOnlyRegex "" = "" accountNameToAccountOnlyRegex a = printf "^%s$" $ escapeName a -- XXX pack -- | Convert an exact account-matching regular expression to a plain account name. accountRegexToAccountName :: Regexp -> AccountName accountRegexToAccountName = T.pack . regexReplace "^\\^(.*?)\\(:\\|\\$\\)$" "\\1" -- XXX pack -- | Does this string look like an exact account-matching regular expression ? isAccountRegex :: String -> Bool isAccountRegex s = take 1 s == "^" && (take 5 $ reverse s) == ")$|:(" tests_Hledger_Data_AccountName = TestList [ "accountNameTreeFrom" ~: do accountNameTreeFrom ["a"] `is` Node "root" [Node "a" []] accountNameTreeFrom ["a","b"] `is` Node "root" [Node "a" [], Node "b" []] accountNameTreeFrom ["a","a:b"] `is` Node "root" [Node "a" [Node "a:b" []]] accountNameTreeFrom ["a:b:c"] `is` Node "root" [Node "a" [Node "a:b" [Node "a:b:c" []]]] ,"expandAccountNames" ~: expandAccountNames ["assets:cash","assets:checking","expenses:vacation"] `is` ["assets","assets:cash","assets:checking","expenses","expenses:vacation"] ,"isAccountNamePrefixOf" ~: do "assets" `isAccountNamePrefixOf` "assets" `is` False "assets" `isAccountNamePrefixOf` "assets:bank" `is` True "assets" `isAccountNamePrefixOf` "assets:bank:checking" `is` True "my assets" `isAccountNamePrefixOf` "assets:bank" `is` False ,"isSubAccountNameOf" ~: do "assets" `isSubAccountNameOf` "assets" `is` False "assets:bank" `isSubAccountNameOf` "assets" `is` True "assets:bank:checking" `isSubAccountNameOf` "assets" `is` False "assets:bank" `isSubAccountNameOf` "my assets" `is` False ] hledger-lib-1.2/Hledger/Data/Amount.hs0000644000000000000000000006336213042200120015734 0ustar0000000000000000{-| A simple 'Amount' is some quantity of money, shares, or anything else. It has a (possibly null) 'CommoditySymbol' and a numeric quantity: @ $1 £-50 EUR 3.44 GOOG 500 1.5h 90 apples 0 @ It may also have an assigned 'Price', representing this amount's per-unit or total cost in a different commodity. If present, this is rendered like so: @ EUR 2 \@ $1.50 (unit price) EUR 2 \@\@ $3 (total price) @ A 'MixedAmount' is zero or more simple amounts, so can represent multiple commodities; this is the type most often used: @ 0 $50 + EUR 3 16h + $13.55 + AAPL 500 + 6 oranges @ When a mixed amount has been \"normalised\", it has no more than one amount in each commodity and no zero amounts; or it has just a single zero amount and no others. Limited arithmetic with simple and mixed amounts is supported, best used with similar amounts since it mostly ignores assigned prices and commodity exchange rates. -} {-# LANGUAGE CPP, StandaloneDeriving, RecordWildCards, OverloadedStrings #-} module Hledger.Data.Amount ( -- * Amount amount, nullamt, missingamt, num, usd, eur, gbp, hrs, at, (@@), amountWithCommodity, -- ** arithmetic costOfAmount, divideAmount, -- ** rendering amountstyle, showAmount, showAmountWithZeroCommodity, showAmountDebug, showAmountWithoutPrice, maxprecision, maxprecisionwithpoint, setAmountPrecision, withPrecision, canonicaliseAmount, -- * MixedAmount nullmixedamt, missingmixedamt, mixed, amounts, filterMixedAmount, filterMixedAmountByCommodity, normaliseMixedAmountSquashPricesForDisplay, normaliseMixedAmount, -- ** arithmetic costOfMixedAmount, divideMixedAmount, averageMixedAmounts, isNegativeAmount, isNegativeMixedAmount, isZeroAmount, isReallyZeroAmount, isZeroMixedAmount, isReallyZeroMixedAmount, isReallyZeroMixedAmountCost, -- ** rendering showMixedAmount, showMixedAmountOneLine, showMixedAmountDebug, showMixedAmountWithoutPrice, showMixedAmountOneLineWithoutPrice, showMixedAmountWithZeroCommodity, showMixedAmountWithPrecision, setMixedAmountPrecision, canonicaliseMixedAmount, -- * misc. ltraceamount, tests_Hledger_Data_Amount ) where import Data.Char (isDigit) import Data.Decimal (roundTo) import Data.Function (on) import Data.List import Data.Map (findWithDefault) import Data.Maybe -- import Data.Text (Text) import qualified Data.Text as T import Test.HUnit import Text.Printf import qualified Data.Map as M import Hledger.Data.Types import Hledger.Data.Commodity import Hledger.Utils deriving instance Show MarketPrice amountstyle = AmountStyle L False 0 (Just '.') Nothing ------------------------------------------------------------------------------- -- Amount instance Show Amount where show _a@Amount{..} -- debugLevel < 2 = showAmountWithoutPrice a -- debugLevel < 3 = showAmount a | debugLevel < 6 = printf "Amount {acommodity=%s, aquantity=%s, ..}" (show acommodity) (show aquantity) | otherwise = --showAmountDebug a printf "Amount {acommodity=%s, aquantity=%s, aprice=%s, astyle=%s}" (show acommodity) (show aquantity) (showPriceDebug aprice) (show astyle) instance Num Amount where abs a@Amount{aquantity=q} = a{aquantity=abs q} signum a@Amount{aquantity=q} = a{aquantity=signum q} fromInteger i = nullamt{aquantity=fromInteger i} negate a@Amount{aquantity=q} = a{aquantity=(-q)} (+) = similarAmountsOp (+) (-) = similarAmountsOp (-) (*) = similarAmountsOp (*) -- | The empty simple amount. amount, nullamt :: Amount amount = Amount{acommodity="", aquantity=0, aprice=NoPrice, astyle=amountstyle} nullamt = amount -- | A temporary value for parsed transactions which had no amount specified. missingamt :: Amount missingamt = amount{acommodity="AUTO"} -- Handy amount constructors for tests. -- usd/eur/gbp round their argument to a whole number of pennies/cents. num n = amount{acommodity="", aquantity=n} hrs n = amount{acommodity="h", aquantity=n, astyle=amountstyle{asprecision=2, ascommodityside=R}} usd n = amount{acommodity="$", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=2}} eur n = amount{acommodity="€", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=2}} gbp n = amount{acommodity="£", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=2}} amt `at` priceamt = amt{aprice=UnitPrice priceamt} amt @@ priceamt = amt{aprice=TotalPrice priceamt} -- | Apply a binary arithmetic operator to two amounts, which should -- be in the same commodity if non-zero (warning, this is not checked). -- A zero result keeps the commodity of the second amount. -- The result's display style is that of the second amount, with -- precision set to the highest of either amount. -- Prices are ignored and discarded. -- Remember: the caller is responsible for ensuring both amounts have the same commodity. similarAmountsOp :: (Quantity -> Quantity -> Quantity) -> Amount -> Amount -> Amount similarAmountsOp op Amount{acommodity=_, aquantity=q1, astyle=AmountStyle{asprecision=p1}} Amount{acommodity=c2, aquantity=q2, astyle=s2@AmountStyle{asprecision=p2}} = -- trace ("a1:"++showAmountDebug a1) $ trace ("a2:"++showAmountDebug a2) $ traceWith (("= :"++).showAmountDebug) amount{acommodity=c2, aquantity=q1 `op` q2, astyle=s2{asprecision=max p1 p2}} -- c1==c2 || q1==0 || q2==0 = -- otherwise = error "tried to do simple arithmetic with amounts in different commodities" -- | Convert an amount to the specified commodity, ignoring and discarding -- any assigned prices and assuming an exchange rate of 1. amountWithCommodity :: CommoditySymbol -> Amount -> Amount amountWithCommodity c a = a{acommodity=c, aprice=NoPrice} -- | Convert an amount to the commodity of its assigned price, if any. Notes: -- -- - price amounts must be MixedAmounts with exactly one component Amount (or there will be a runtime error) XXX -- -- - price amounts should be positive, though this is not currently enforced costOfAmount :: Amount -> Amount costOfAmount a@Amount{aquantity=q, aprice=price} = case price of NoPrice -> a UnitPrice p@Amount{aquantity=pq} -> p{aquantity=pq * q} TotalPrice p@Amount{aquantity=pq} -> p{aquantity=pq * signum q} -- | Divide an amount's quantity by a constant. divideAmount :: Amount -> Quantity -> Amount divideAmount a@Amount{aquantity=q} d = a{aquantity=q/d} -- | Is this amount negative ? The price is ignored. isNegativeAmount :: Amount -> Bool isNegativeAmount Amount{aquantity=q} = q < 0 digits = "123456789" :: String -- | Does this amount appear to be zero when displayed with its given precision ? isZeroAmount :: Amount -> Bool isZeroAmount a -- a==missingamt = False | otherwise = (null . filter (`elem` digits) . showAmountWithoutPriceOrCommodity) a -- | Is this amount "really" zero, regardless of the display precision ? isReallyZeroAmount :: Amount -> Bool isReallyZeroAmount Amount{aquantity=q} = q == 0 -- | Get the string representation of an amount, based on its commodity's -- display settings except using the specified precision. showAmountWithPrecision :: Int -> Amount -> String showAmountWithPrecision p = showAmount . setAmountPrecision p -- | Set an amount's display precision. setAmountPrecision :: Int -> Amount -> Amount setAmountPrecision p a@Amount{astyle=s} = a{astyle=s{asprecision=p}} -- | Set an amount's display precision, flipped. withPrecision :: Amount -> Int -> Amount withPrecision = flip setAmountPrecision -- | Get a string representation of an amount for debugging, -- appropriate to the current debug level. 9 shows maximum detail. showAmountDebug :: Amount -> String showAmountDebug Amount{acommodity="AUTO"} = "(missing)" showAmountDebug Amount{..} = printf "Amount {acommodity=%s, aquantity=%s, aprice=%s, astyle=%s}" (show acommodity) (show aquantity) (showPriceDebug aprice) (show astyle) -- | Get the string representation of an amount, without any \@ price. showAmountWithoutPrice :: Amount -> String showAmountWithoutPrice a = showAmount a{aprice=NoPrice} -- | Get the string representation of an amount, without any price or commodity symbol. showAmountWithoutPriceOrCommodity :: Amount -> String showAmountWithoutPriceOrCommodity a = showAmount a{acommodity="", aprice=NoPrice} showPrice :: Price -> String showPrice NoPrice = "" showPrice (UnitPrice pa) = " @ " ++ showAmount pa showPrice (TotalPrice pa) = " @@ " ++ showAmount pa showPriceDebug :: Price -> String showPriceDebug NoPrice = "" showPriceDebug (UnitPrice pa) = " @ " ++ showAmountDebug pa showPriceDebug (TotalPrice pa) = " @@ " ++ showAmountDebug pa -- | Get the string representation of an amount, based on its -- commodity's display settings. String representations equivalent to -- zero are converted to just \"0\". The special "missing" amount is -- displayed as the empty string. showAmount :: Amount -> String showAmount = showAmountHelper False showAmountHelper :: Bool -> Amount -> String showAmountHelper _ Amount{acommodity="AUTO"} = "" showAmountHelper showzerocommodity a@(Amount{acommodity=c, aprice=p, astyle=AmountStyle{..}}) = case ascommodityside of L -> printf "%s%s%s%s" (T.unpack c') space quantity' price R -> printf "%s%s%s%s" quantity' space (T.unpack c') price where quantity = showamountquantity a displayingzero = null $ filter (`elem` digits) $ quantity (quantity',c') | displayingzero && not showzerocommodity = ("0","") | otherwise = (quantity, quoteCommoditySymbolIfNeeded c) space = if (not (T.null c') && ascommodityspaced) then " " else "" :: String price = showPrice p -- | Like showAmount, but show a zero amount's commodity if it has one. showAmountWithZeroCommodity :: Amount -> String showAmountWithZeroCommodity = showAmountHelper True -- | Get the string representation of the number part of of an amount, -- using the display settings from its commodity. showamountquantity :: Amount -> String showamountquantity Amount{aquantity=q, astyle=AmountStyle{asprecision=p, asdecimalpoint=mdec, asdigitgroups=mgrps}} = punctuatenumber (fromMaybe '.' mdec) mgrps $ qstr where -- isint n = fromIntegral (round n) == n qstr -- p == maxprecision && isint q = printf "%d" (round q::Integer) | p == maxprecisionwithpoint = show q | p == maxprecision = chopdotzero $ show q | otherwise = show $ roundTo (fromIntegral p) q -- | Replace a number string's decimal point with the specified character, -- and add the specified digit group separators. The last digit group will -- be repeated as needed. punctuatenumber :: Char -> Maybe DigitGroupStyle -> String -> String punctuatenumber dec mgrps s = sign ++ reverse (applyDigitGroupStyle mgrps (reverse int)) ++ frac'' where (sign,num) = break isDigit s (int,frac) = break (=='.') num frac' = dropWhile (=='.') frac frac'' | null frac' = "" | otherwise = dec:frac' applyDigitGroupStyle :: Maybe DigitGroupStyle -> String -> String applyDigitGroupStyle Nothing s = s applyDigitGroupStyle (Just (DigitGroups c gs)) s = addseps (repeatLast gs) s where addseps [] s = s addseps (g:gs) s | length s <= g = s | otherwise = let (part,rest) = splitAt g s in part ++ [c] ++ addseps gs rest repeatLast [] = [] repeatLast gs = init gs ++ repeat (last gs) chopdotzero str = reverse $ case reverse str of '0':'.':s -> s s -> s -- | For rendering: a special precision value which means show all available digits. maxprecision :: Int maxprecision = 999998 -- | For rendering: a special precision value which forces display of a decimal point. maxprecisionwithpoint :: Int maxprecisionwithpoint = 999999 -- like journalCanonicaliseAmounts -- | Canonicalise an amount's display style using the provided commodity style map. canonicaliseAmount :: M.Map CommoditySymbol AmountStyle -> Amount -> Amount canonicaliseAmount styles a@Amount{acommodity=c, astyle=s} = a{astyle=s'} where s' = findWithDefault s c styles ------------------------------------------------------------------------------- -- MixedAmount instance Show MixedAmount where show | debugLevel < 3 = intercalate "\\n" . lines . showMixedAmountWithoutPrice -- debugLevel < 6 = intercalate "\\n" . lines . showMixedAmount | otherwise = showMixedAmountDebug instance Num MixedAmount where fromInteger i = Mixed [fromInteger i] negate (Mixed as) = Mixed $ map negate as (+) (Mixed as) (Mixed bs) = normaliseMixedAmount $ Mixed $ as ++ bs (*) = error' "error, mixed amounts do not support multiplication" abs = error' "error, mixed amounts do not support abs" signum = error' "error, mixed amounts do not support signum" -- | The empty mixed amount. nullmixedamt :: MixedAmount nullmixedamt = Mixed [] -- | A temporary value for parsed transactions which had no amount specified. missingmixedamt :: MixedAmount missingmixedamt = Mixed [missingamt] -- | Convert amounts in various commodities into a normalised MixedAmount. mixed :: [Amount] -> MixedAmount mixed = normaliseMixedAmount . Mixed -- | Simplify a mixed amount's component amounts: -- -- * amounts in the same commodity are combined unless they have different prices or total prices -- -- * multiple zero amounts, all with the same non-null commodity, are replaced by just the last of them, preserving the commodity and amount style (all but the last zero amount are discarded) -- -- * multiple zero amounts with multiple commodities, or no commodities, are replaced by one commodity-less zero amount -- -- * an empty amount list is replaced by one commodity-less zero amount -- -- * the special "missing" mixed amount remains unchanged -- normaliseMixedAmount :: MixedAmount -> MixedAmount normaliseMixedAmount = normaliseHelper False normaliseHelper :: Bool -> MixedAmount -> MixedAmount normaliseHelper squashprices (Mixed as) | missingamt `elem` as = missingmixedamt -- missingamt should always be alone, but detect it even if not | null nonzeros = Mixed [newzero] | otherwise = Mixed nonzeros where newzero = case filter (/= "") (map acommodity zeros) of _:_ -> last zeros _ -> nullamt (zeros, nonzeros) = partition isReallyZeroAmount $ map sumSimilarAmountsUsingFirstPrice $ groupBy groupfn $ sortBy sortfn $ as sortfn | squashprices = compare `on` acommodity | otherwise = compare `on` \a -> (acommodity a, aprice a) groupfn | squashprices = (==) `on` acommodity | otherwise = \a1 a2 -> acommodity a1 == acommodity a2 && combinableprices a1 a2 combinableprices Amount{aprice=NoPrice} Amount{aprice=NoPrice} = True combinableprices Amount{aprice=UnitPrice p1} Amount{aprice=UnitPrice p2} = p1 == p2 combinableprices _ _ = False tests_normaliseMixedAmount = [ "normaliseMixedAmount" ~: do -- assertEqual "missing amount is discarded" (Mixed [nullamt]) (normaliseMixedAmount $ Mixed [usd 0, missingamt]) assertEqual "any missing amount means a missing mixed amount" missingmixedamt (normaliseMixedAmount $ Mixed [usd 0, missingamt]) assertEqual "unpriced same-commodity amounts are combined" (Mixed [usd 2]) (normaliseMixedAmount $ Mixed [usd 0, usd 2]) -- amounts with same unit price are combined normaliseMixedAmount (Mixed [usd 1 `at` eur 1, usd 1 `at` eur 1]) `is` Mixed [usd 2 `at` eur 1] -- amounts with different unit prices are not combined normaliseMixedAmount (Mixed [usd 1 `at` eur 1, usd 1 `at` eur 2]) `is` Mixed [usd 1 `at` eur 1, usd 1 `at` eur 2] -- amounts with total prices are not combined normaliseMixedAmount (Mixed [usd 1 @@ eur 1, usd 1 @@ eur 1]) `is` Mixed [usd 1 @@ eur 1, usd 1 @@ eur 1] ] -- | Like normaliseMixedAmount, but combine each commodity's amounts -- into just one by throwing away all prices except the first. This is -- only used as a rendering helper, and could show a misleading price. normaliseMixedAmountSquashPricesForDisplay :: MixedAmount -> MixedAmount normaliseMixedAmountSquashPricesForDisplay = normaliseHelper True tests_normaliseMixedAmountSquashPricesForDisplay = [ "normaliseMixedAmountSquashPricesForDisplay" ~: do normaliseMixedAmountSquashPricesForDisplay (Mixed []) `is` Mixed [nullamt] assertBool "" $ isZeroMixedAmount $ normaliseMixedAmountSquashPricesForDisplay (Mixed [usd 10 ,usd 10 @@ eur 7 ,usd (-10) ,usd (-10) @@ eur 7 ]) ] -- | Sum same-commodity amounts in a lossy way, applying the first -- price to the result and discarding any other prices. Only used as a -- rendering helper. sumSimilarAmountsUsingFirstPrice :: [Amount] -> Amount sumSimilarAmountsUsingFirstPrice [] = nullamt sumSimilarAmountsUsingFirstPrice as = (sumStrict as){aprice=aprice $ head as} -- -- | Sum same-commodity amounts. If there were different prices, set -- -- the price to a special marker indicating "various". Only used as a -- -- rendering helper. -- sumSimilarAmountsNotingPriceDifference :: [Amount] -> Amount -- sumSimilarAmountsNotingPriceDifference [] = nullamt -- sumSimilarAmountsNotingPriceDifference as = undefined -- | Get a mixed amount's component amounts. amounts :: MixedAmount -> [Amount] amounts (Mixed as) = as -- | Filter a mixed amount's component amounts by a predicate. filterMixedAmount :: (Amount -> Bool) -> MixedAmount -> MixedAmount filterMixedAmount p (Mixed as) = Mixed $ filter p as -- | Return an unnormalised MixedAmount containing exactly one Amount -- with the specified commodity and the quantity of that commodity -- found in the original. NB if Amount's quantity is zero it will be -- discarded next time the MixedAmount gets normalised. filterMixedAmountByCommodity :: CommoditySymbol -> MixedAmount -> MixedAmount filterMixedAmountByCommodity c (Mixed as) = Mixed as' where as' = case filter ((==c) . acommodity) as of [] -> [nullamt{acommodity=c}] as'' -> [sum as''] -- | Convert a mixed amount's component amounts to the commodity of their -- assigned price, if any. costOfMixedAmount :: MixedAmount -> MixedAmount costOfMixedAmount (Mixed as) = Mixed $ map costOfAmount as -- | Divide a mixed amount's quantities by a constant. divideMixedAmount :: MixedAmount -> Quantity -> MixedAmount divideMixedAmount (Mixed as) d = Mixed $ map (flip divideAmount d) as -- | Calculate the average of some mixed amounts. averageMixedAmounts :: [MixedAmount] -> MixedAmount averageMixedAmounts [] = 0 averageMixedAmounts as = sum as `divideMixedAmount` fromIntegral (length as) -- | Is this mixed amount negative, if it can be normalised to a single commodity ? isNegativeMixedAmount :: MixedAmount -> Maybe Bool isNegativeMixedAmount m = case as of [a] -> Just $ isNegativeAmount a _ -> Nothing where as = amounts $ normaliseMixedAmountSquashPricesForDisplay m -- | Does this mixed amount appear to be zero when displayed with its given precision ? isZeroMixedAmount :: MixedAmount -> Bool isZeroMixedAmount = all isZeroAmount . amounts . normaliseMixedAmountSquashPricesForDisplay -- | Is this mixed amount "really" zero ? See isReallyZeroAmount. isReallyZeroMixedAmount :: MixedAmount -> Bool isReallyZeroMixedAmount = all isReallyZeroAmount . amounts . normaliseMixedAmountSquashPricesForDisplay -- | Is this mixed amount "really" zero, after converting to cost -- commodities where possible ? isReallyZeroMixedAmountCost :: MixedAmount -> Bool isReallyZeroMixedAmountCost = isReallyZeroMixedAmount . costOfMixedAmount -- -- | MixedAmount derived Eq instance in Types.hs doesn't know that we -- -- want $0 = EUR0 = 0. Yet we don't want to drag all this code over there. -- -- For now, use this when cross-commodity zero equality is important. -- mixedAmountEquals :: MixedAmount -> MixedAmount -> Bool -- mixedAmountEquals a b = amounts a' == amounts b' || (isZeroMixedAmount a' && isZeroMixedAmount b') -- where a' = normaliseMixedAmountSquashPricesForDisplay a -- b' = normaliseMixedAmountSquashPricesForDisplay b -- | Get the string representation of a mixed amount, after -- normalising it to one amount per commodity. Assumes amounts have -- no or similar prices, otherwise this can show misleading prices. showMixedAmount :: MixedAmount -> String showMixedAmount = showMixedAmountHelper False False -- | Like showMixedAmount, but zero amounts are shown with their -- commodity if they have one. showMixedAmountWithZeroCommodity :: MixedAmount -> String showMixedAmountWithZeroCommodity = showMixedAmountHelper True False -- | Get the one-line string representation of a mixed amount. showMixedAmountOneLine :: MixedAmount -> String showMixedAmountOneLine = showMixedAmountHelper False True showMixedAmountHelper :: Bool -> Bool -> MixedAmount -> String showMixedAmountHelper showzerocommodity useoneline m = join $ map showamt $ amounts $ normaliseMixedAmountSquashPricesForDisplay m where join | useoneline = intercalate ", " | otherwise = vConcatRightAligned showamt | showzerocommodity = showAmountWithZeroCommodity | otherwise = showAmount -- | Compact labelled trace of a mixed amount, for debugging. ltraceamount :: String -> MixedAmount -> MixedAmount ltraceamount s = traceWith (((s ++ ": ") ++).showMixedAmount) -- | Set the display precision in the amount's commodities. setMixedAmountPrecision :: Int -> MixedAmount -> MixedAmount setMixedAmountPrecision p (Mixed as) = Mixed $ map (setAmountPrecision p) as -- | Get the string representation of a mixed amount, showing each of its -- component amounts with the specified precision, ignoring their -- commoditys' display precision settings. showMixedAmountWithPrecision :: Int -> MixedAmount -> String showMixedAmountWithPrecision p m = vConcatRightAligned $ map (showAmountWithPrecision p) $ amounts $ normaliseMixedAmountSquashPricesForDisplay m -- | Get an unambiguous string representation of a mixed amount for debugging. showMixedAmountDebug :: MixedAmount -> String showMixedAmountDebug m | m == missingmixedamt = "(missing)" | otherwise = printf "Mixed [%s]" as where as = intercalate "\n " $ map showAmountDebug $ amounts m -- | Get the string representation of a mixed amount, but without -- any \@ prices. showMixedAmountWithoutPrice :: MixedAmount -> String showMixedAmountWithoutPrice m = concat $ intersperse "\n" $ map showfixedwidth as where (Mixed as) = normaliseMixedAmountSquashPricesForDisplay $ stripPrices m stripPrices (Mixed as) = Mixed $ map stripprice as where stripprice a = a{aprice=NoPrice} width = maximum $ map (length . showAmount) as showfixedwidth = printf (printf "%%%ds" width) . showAmountWithoutPrice -- | Get the one-line string representation of a mixed amount, but without -- any \@ prices. showMixedAmountOneLineWithoutPrice :: MixedAmount -> String showMixedAmountOneLineWithoutPrice m = concat $ intersperse ", " $ map showAmountWithoutPrice as where (Mixed as) = normaliseMixedAmountSquashPricesForDisplay $ stripPrices m stripPrices (Mixed as) = Mixed $ map stripprice as where stripprice a = a{aprice=NoPrice} -- | Canonicalise a mixed amount's display styles using the provided commodity style map. canonicaliseMixedAmount :: M.Map CommoditySymbol AmountStyle -> MixedAmount -> MixedAmount canonicaliseMixedAmount styles (Mixed as) = Mixed $ map (canonicaliseAmount styles) as ------------------------------------------------------------------------------- -- misc tests_Hledger_Data_Amount = TestList $ tests_normaliseMixedAmount ++ tests_normaliseMixedAmountSquashPricesForDisplay ++ [ -- Amount "costOfAmount" ~: do costOfAmount (eur 1) `is` eur 1 costOfAmount (eur 2){aprice=UnitPrice $ usd 2} `is` usd 4 costOfAmount (eur 1){aprice=TotalPrice $ usd 2} `is` usd 2 costOfAmount (eur (-1)){aprice=TotalPrice $ usd 2} `is` usd (-2) ,"isZeroAmount" ~: do assertBool "" $ isZeroAmount $ amount assertBool "" $ isZeroAmount $ usd 0 ,"negating amounts" ~: do let a = usd 1 negate a `is` a{aquantity=(-1)} let b = (usd 1){aprice=UnitPrice $ eur 2} negate b `is` b{aquantity=(-1)} ,"adding amounts without prices" ~: do let a1 = usd 1.23 let a2 = usd (-1.23) let a3 = usd (-1.23) (a1 + a2) `is` usd 0 (a1 + a3) `is` usd 0 (a2 + a3) `is` usd (-2.46) (a3 + a3) `is` usd (-2.46) sum [a1,a2,a3,-a3] `is` usd 0 -- highest precision is preserved let ap1 = usd 1 `withPrecision` 1 ap3 = usd 1 `withPrecision` 3 (asprecision $ astyle $ sum [ap1,ap3]) `is` 3 (asprecision $ astyle $ sum [ap3,ap1]) `is` 3 -- adding different commodities assumes conversion rate 1 assertBool "" $ isZeroAmount (a1 - eur 1.23) ,"showAmount" ~: do showAmount (usd 0 + gbp 0) `is` "0" -- MixedAmount ,"adding mixed amounts to zero, the commodity and amount style are preserved" ~: do (sum $ map (Mixed . (:[])) [usd 1.25 ,usd (-1) `withPrecision` 3 ,usd (-0.25) ]) `is` Mixed [usd 0 `withPrecision` 3] ,"adding mixed amounts with total prices" ~: do (sum $ map (Mixed . (:[])) [usd 1 @@ eur 1 ,usd (-2) @@ eur 1 ]) `is` (Mixed [usd 1 @@ eur 1 ,usd (-2) @@ eur 1 ]) ,"showMixedAmount" ~: do showMixedAmount (Mixed [usd 1]) `is` "$1.00" showMixedAmount (Mixed [usd 1 `at` eur 2]) `is` "$1.00 @ €2.00" showMixedAmount (Mixed [usd 0]) `is` "0" showMixedAmount (Mixed []) `is` "0" showMixedAmount missingmixedamt `is` "" ,"showMixedAmountWithoutPrice" ~: do let a = usd 1 `at` eur 2 showMixedAmountWithoutPrice (Mixed [a]) `is` "$1.00" showMixedAmountWithoutPrice (Mixed [a, (-a)]) `is` "0" ] hledger-lib-1.2/Hledger/Data/AutoTransaction.hs0000644000000000000000000001325013066173044017622 0ustar0000000000000000{-# LANGUAGE OverloadedStrings, ViewPatterns #-} {-| This module provides utilities for applying automated transactions like 'ModifierTransaction' and 'PeriodicTransaction'. -} module Hledger.Data.AutoTransaction ( -- * Transaction processors runModifierTransaction , runPeriodicTransaction -- * Accessors , mtvaluequery , jdatespan ) where import Data.Maybe import Data.Monoid ((<>)) import Data.Time.Calendar import qualified Data.Text as T import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Amount import Hledger.Data.Transaction import Hledger.Utils.Parse import Hledger.Utils.UTF8IOCompat (error') import Hledger.Query -- $setup -- >>> :set -XOverloadedStrings -- >>> import Hledger.Data.Posting -- >>> import Hledger.Data.Journal -- | Builds a 'Transaction' transformer based on 'ModifierTransaction'. -- -- 'Query' parameter allows injection of additional restriction on posting -- match. Don't forget to call 'txnTieKnot'. -- -- >>> runModifierTransaction Any (ModifierTransaction "" ["pong" `post` usd 2]) nulltransaction{tpostings=["ping" `post` usd 1]} -- 0000/01/01 -- ping $1.00 -- pong $2.00 -- -- -- >>> runModifierTransaction Any (ModifierTransaction "miss" ["pong" `post` usd 2]) nulltransaction{tpostings=["ping" `post` usd 1]} -- 0000/01/01 -- ping $1.00 -- -- -- >>> runModifierTransaction None (ModifierTransaction "" ["pong" `post` usd 2]) nulltransaction{tpostings=["ping" `post` usd 1]} -- 0000/01/01 -- ping $1.00 -- -- -- >>> runModifierTransaction Any (ModifierTransaction "ping" ["pong" `post` amount{acommodity="*", aquantity=3}]) nulltransaction{tpostings=["ping" `post` usd 2]} -- 0000/01/01 -- ping $2.00 -- pong $6.00 -- -- runModifierTransaction :: Query -> ModifierTransaction -> (Transaction -> Transaction) runModifierTransaction q mt = modifier where q' = simplifyQuery $ And [q, mtvaluequery mt (error "query cannot depend on current time")] mods = map runModifierPosting $ mtpostings mt generatePostings ps = [m p | p <- ps, q' `matchesPosting` p, m <- mods] modifier t@(tpostings -> ps) = t { tpostings = ps ++ generatePostings ps } -- | Extract 'Query' equivalent of 'mtvalueexpr' from 'ModifierTransaction' -- -- >>> mtvaluequery (ModifierTransaction "" []) undefined -- Any -- >>> mtvaluequery (ModifierTransaction "ping" []) undefined -- Acct "ping" -- >>> mtvaluequery (ModifierTransaction "date:2016" []) undefined -- Date (DateSpan 2016) -- >>> mtvaluequery (ModifierTransaction "date:today" []) (read "2017-01-01") -- Date (DateSpan 2017/01/01) mtvaluequery :: ModifierTransaction -> (Day -> Query) mtvaluequery mt = fst . flip parseQuery (mtvalueexpr mt) -- | 'DateSpan' of all dates mentioned in 'Journal' -- -- >>> jdatespan nulljournal -- DateSpan - -- >>> jdatespan nulljournal{jtxns=[nulltransaction{tdate=read "2016-01-01"}] } -- DateSpan 2016/01/01 -- >>> jdatespan nulljournal{jtxns=[nulltransaction{tdate=read "2016-01-01", tpostings=[nullposting{pdate=Just $ read "2016-02-01"}]}] } -- DateSpan 2016/01/01-2016/02/01 jdatespan :: Journal -> DateSpan jdatespan j | null dates = nulldatespan | otherwise = DateSpan (Just $ minimum dates) (Just $ 1 `addDays` maximum dates) where dates = concatMap tdates $ jtxns j -- | 'DateSpan' of all dates mentioned in 'Transaction' -- -- >>> tdates nulltransaction -- [0000-01-01] tdates :: Transaction -> [Day] tdates t = tdate t : concatMap pdates (tpostings t) ++ maybeToList (tdate2 t) where pdates p = catMaybes [pdate p, pdate2 p] postingScale :: Posting -> Maybe Quantity postingScale p = case amounts $ pamount p of [a] | acommodity a == "*" -> Just $ aquantity a _ -> Nothing runModifierPosting :: Posting -> (Posting -> Posting) runModifierPosting p' = modifier where modifier p = renderPostingCommentDates $ p' { pdate = pdate p , pdate2 = pdate2 p , pamount = amount' p } amount' = case postingScale p' of Nothing -> const $ pamount p' Just n -> \p -> pamount p `divideMixedAmount` (1/n) renderPostingCommentDates :: Posting -> Posting renderPostingCommentDates p = p { pcomment = comment' } where datesComment = T.concat $ catMaybes [T.pack . showDate <$> pdate p, ("=" <>) . T.pack . showDate <$> pdate2 p] comment' | T.null datesComment = pcomment p | otherwise = T.intercalate "\n" $ filter (not . T.null) [T.strip $ pcomment p, "[" <> datesComment <> "]"] -- | Generate transactions from 'PeriodicTransaction' within a 'DateSpan' -- -- Note that new transactions require 'txnTieKnot' post-processing. -- -- >>> mapM_ (putStr . show) $ runPeriodicTransaction (PeriodicTransaction "monthly from 2017/1 to 2017/4" ["hi" `post` usd 1]) nulldatespan -- 2017/01/01 -- hi $1.00 -- -- 2017/02/01 -- hi $1.00 -- -- 2017/03/01 -- hi $1.00 -- runPeriodicTransaction :: PeriodicTransaction -> (DateSpan -> [Transaction]) runPeriodicTransaction pt = generate where base = nulltransaction { tpostings = ptpostings pt } periodExpr = ptperiodicexpr pt errCurrent = error' $ "Current date cannot be referenced in " ++ show (T.unpack periodExpr) (interval, effectspan) = case parsePeriodExpr errCurrent periodExpr of Left e -> error' $ "Failed to parse " ++ show (T.unpack periodExpr) ++ ": " ++ showDateParseError e Right x -> x generate jspan = [base {tdate=date} | span <- interval `splitSpan` spanIntersect effectspan jspan, let Just date = spanStart span] hledger-lib-1.2/Hledger/Data/Commodity.hs0000644000000000000000000000440013035210046016433 0ustar0000000000000000{-| A 'Commodity' is a symbol representing a currency or some other kind of thing we are tracking, and some display preferences that tell how to display 'Amount's of the commodity - is the symbol on the left or right, are thousands separated by comma, significant decimal places and so on. -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Commodity where import Data.List import Data.Maybe (fromMaybe) import Data.Monoid -- import Data.Text (Text) import qualified Data.Text as T import Test.HUnit -- import qualified Data.Map as M import Hledger.Data.Types import Hledger.Utils -- characters that may not be used in a non-quoted commodity symbol nonsimplecommoditychars = "0123456789-+.@;\n \"{}=" :: [Char] quoteCommoditySymbolIfNeeded s | any (`elem` nonsimplecommoditychars) (T.unpack s) = "\"" <> s <> "\"" | otherwise = s commodity = "" -- handy constructors for tests -- unknown = commodity -- usd = "$" -- eur = "€" -- gbp = "£" -- hour = "h" -- Some sample commodity' names and symbols, for use in tests.. commoditysymbols = [("unknown","") ,("usd","$") ,("eur","€") ,("gbp","£") ,("hour","h") ] -- | Look up one of the sample commodities' symbol by name. comm :: String -> CommoditySymbol comm name = snd $ fromMaybe (error' "commodity lookup failed") (find (\n -> fst n == name) commoditysymbols) -- | Find the conversion rate between two commodities. Currently returns 1. conversionRate :: CommoditySymbol -> CommoditySymbol -> Double conversionRate _ _ = 1 -- -- | Convert a list of commodities to a map from commodity symbols to -- -- unique, display-preference-canonicalised commodities. -- canonicaliseCommodities :: [CommoditySymbol] -> Map.Map String CommoditySymbol -- canonicaliseCommodities cs = -- Map.fromList [(s,firstc{precision=maxp}) | s <- symbols, -- let cs = commoditymap ! s, -- let firstc = head cs, -- let maxp = maximum $ map precision cs -- ] -- where -- commoditymap = Map.fromList [(s, commoditieswithsymbol s) | s <- symbols] -- commoditieswithsymbol s = filter ((s==) . symbol) cs -- symbols = nub $ map symbol cs tests_Hledger_Data_Commodity = TestList [ ] hledger-lib-1.2/Hledger/Data/Dates.hs0000644000000000000000000007042613035510426015547 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE NoMonoLocalBinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE OverloadedStrings #-} {-| Date parsing and utilities for hledger. For date and time values, we use the standard Day and UTCTime types. A 'SmartDate' is a date which may be partially-specified or relative. Eg 2008\/12\/31, but also 2008\/12, 12\/31, tomorrow, last week, next year. We represent these as a triple of strings like (\"2008\",\"12\",\"\"), (\"\",\"\",\"tomorrow\"), (\"\",\"last\",\"week\"). A 'DateSpan' is the span of time between two specific calendar dates, or an open-ended span where one or both dates are unspecified. (A date span with both ends unspecified matches all dates.) An 'Interval' is ledger's \"reporting interval\" - weekly, monthly, quarterly, etc. 'Period' will probably replace DateSpan in due course. -} -- XXX fromGregorian silently clips bad dates, use fromGregorianValid instead ? module Hledger.Data.Dates ( -- * Misc date handling utilities getCurrentDay, getCurrentMonth, getCurrentYear, nulldate, spanContainsDate, periodContainsDate, parsedateM, parsedate, showDate, showDateSpan, elapsedSeconds, prevday, parsePeriodExpr, nulldatespan, failIfInvalidYear, failIfInvalidMonth, failIfInvalidDay, datesepchar, datesepchars, spanStart, spanEnd, spansSpan, spanIntersect, spansIntersect, spanDefaultsFrom, spanUnion, spansUnion, smartdate, splitSpan, fixSmartDate, fixSmartDateStr, fixSmartDateStrEither, fixSmartDateStrEither', daysInSpan, maybePeriod, mkdatespan, ) where import Prelude () import Prelude.Compat import Control.Monad import Data.List.Compat import Data.Maybe import Data.Text (Text) import qualified Data.Text as T #if MIN_VERSION_time(1,5,0) import Data.Time.Format hiding (months) #else import Data.Time.Format import System.Locale (TimeLocale, defaultTimeLocale) #endif import Data.Time.Calendar import Data.Time.Calendar.OrdinalDate import Data.Time.Clock import Data.Time.LocalTime import Safe (headMay, lastMay, readMay) import Text.Megaparsec import Text.Megaparsec.Text import Text.Printf import Hledger.Data.Types import Hledger.Data.Period import Hledger.Utils -- Help ppShow parse and line-wrap DateSpans better in debug output. instance Show DateSpan where show s = "DateSpan " ++ showDateSpan s -- show s = "DateSpan \"" ++ showDateSpan s ++ "\"" -- quotes to help pretty-show showDate :: Day -> String showDate = formatTime defaultTimeLocale "%0C%y/%m/%d" -- | Render a datespan as a display string, abbreviating into a -- compact form if possible. showDateSpan :: DateSpan -> String showDateSpan = showPeriod . dateSpanAsPeriod -- | Get the current local date. getCurrentDay :: IO Day getCurrentDay = do t <- getZonedTime return $ localDay (zonedTimeToLocalTime t) -- | Get the current local month number. getCurrentMonth :: IO Int getCurrentMonth = do (_,m,_) <- toGregorian `fmap` getCurrentDay return m -- | Get the current local year. getCurrentYear :: IO Integer getCurrentYear = do (y,_,_) <- toGregorian `fmap` getCurrentDay return y elapsedSeconds :: Fractional a => UTCTime -> UTCTime -> a elapsedSeconds t1 = realToFrac . diffUTCTime t1 spanStart :: DateSpan -> Maybe Day spanStart (DateSpan d _) = d spanEnd :: DateSpan -> Maybe Day spanEnd (DateSpan _ d) = d -- might be useful later: http://en.wikipedia.org/wiki/Allen%27s_interval_algebra -- | Get overall span enclosing multiple sequentially ordered spans. spansSpan :: [DateSpan] -> DateSpan spansSpan spans = DateSpan (maybe Nothing spanStart $ headMay spans) (maybe Nothing spanEnd $ lastMay spans) -- | Split a DateSpan into one or more consecutive whole spans of the specified length which enclose it. -- If no interval is specified, the original span is returned. -- -- ==== Examples: -- >>> let t i d1 d2 = splitSpan i $ mkdatespan d1 d2 -- >>> t NoInterval "2008/01/01" "2009/01/01" -- [DateSpan 2008] -- >>> t (Quarters 1) "2008/01/01" "2009/01/01" -- [DateSpan 2008q1,DateSpan 2008q2,DateSpan 2008q3,DateSpan 2008q4] -- >>> splitSpan (Quarters 1) nulldatespan -- [DateSpan -] -- >>> t (Days 1) "2008/01/01" "2008/01/01" -- an empty datespan -- [DateSpan 2008/01/01-2007/12/31] -- >>> t (Quarters 1) "2008/01/01" "2008/01/01" -- [DateSpan 2008/01/01-2007/12/31] -- >>> t (Months 1) "2008/01/01" "2008/04/01" -- [DateSpan 2008/01,DateSpan 2008/02,DateSpan 2008/03] -- >>> t (Months 2) "2008/01/01" "2008/04/01" -- [DateSpan 2008/01/01-2008/02/29,DateSpan 2008/03/01-2008/04/30] -- >>> t (Weeks 1) "2008/01/01" "2008/01/15" -- [DateSpan 2007/12/31w01,DateSpan 2008/01/07w02,DateSpan 2008/01/14w03] -- >>> t (Weeks 2) "2008/01/01" "2008/01/15" -- [DateSpan 2007/12/31-2008/01/13,DateSpan 2008/01/14-2008/01/27] -- >>> t (DayOfMonth 2) "2008/01/01" "2008/04/01" -- [DateSpan 2008/01/02-2008/02/01,DateSpan 2008/02/02-2008/03/01,DateSpan 2008/03/02-2008/04/01] -- >>> t (DayOfWeek 2) "2011/01/01" "2011/01/15" -- [DateSpan 2011/01/04-2011/01/10,DateSpan 2011/01/11-2011/01/17] -- splitSpan :: Interval -> DateSpan -> [DateSpan] splitSpan _ (DateSpan Nothing Nothing) = [DateSpan Nothing Nothing] splitSpan NoInterval s = [s] splitSpan (Days n) s = splitspan startofday (applyN n nextday) s splitSpan (Weeks n) s = splitspan startofweek (applyN n nextweek) s splitSpan (Months n) s = splitspan startofmonth (applyN n nextmonth) s splitSpan (Quarters n) s = splitspan startofquarter (applyN n nextquarter) s splitSpan (Years n) s = splitspan startofyear (applyN n nextyear) s splitSpan (DayOfMonth n) s = splitspan (nthdayofmonthcontaining n) (applyN (n-1) nextday . nextmonth) s splitSpan (DayOfWeek n) s = splitspan (nthdayofweekcontaining n) (applyN (n-1) nextday . nextweek) s -- splitSpan (WeekOfYear n) s = splitspan startofweek (applyN n nextweek) s -- splitSpan (MonthOfYear n) s = splitspan startofmonth (applyN n nextmonth) s -- splitSpan (QuarterOfYear n) s = splitspan startofquarter (applyN n nextquarter) s -- Split the given span using the provided helper functions: -- start is applied to the span's start date to get the first sub-span's start date -- next is applied to a sub-span's start date to get the next sub-span's start date splitspan :: (Day -> Day) -> (Day -> Day) -> DateSpan -> [DateSpan] splitspan _ _ (DateSpan Nothing Nothing) = [] splitspan start next (DateSpan Nothing (Just e)) = splitspan start next (DateSpan (Just $ start e) (Just $ next $ start e)) splitspan start next (DateSpan (Just s) Nothing) = splitspan start next (DateSpan (Just $ start s) (Just $ next $ start s)) splitspan start next span@(DateSpan (Just s) (Just e)) | s == e = [span] | otherwise = splitspan' start next span where splitspan' start next (DateSpan (Just s) (Just e)) | s >= e = [] | otherwise = DateSpan (Just subs) (Just sube) : splitspan' start next (DateSpan (Just sube) (Just e)) where subs = start s sube = next subs splitspan' _ _ _ = error' "won't happen, avoids warnings" -- | Count the days in a DateSpan, or if it is open-ended return Nothing. daysInSpan :: DateSpan -> Maybe Integer daysInSpan (DateSpan (Just d1) (Just d2)) = Just $ diffDays d2 d1 daysInSpan _ = Nothing -- | Does the span include the given date ? spanContainsDate :: DateSpan -> Day -> Bool spanContainsDate (DateSpan Nothing Nothing) _ = True spanContainsDate (DateSpan Nothing (Just e)) d = d < e spanContainsDate (DateSpan (Just b) Nothing) d = d >= b spanContainsDate (DateSpan (Just b) (Just e)) d = d >= b && d < e -- | Does the period include the given date ? -- (Here to avoid import cycle). periodContainsDate :: Period -> Day -> Bool periodContainsDate p = spanContainsDate (periodAsDateSpan p) -- | Calculate the intersection of a number of datespans. spansIntersect [] = nulldatespan spansIntersect [d] = d spansIntersect (d:ds) = d `spanIntersect` (spansIntersect ds) -- | Calculate the intersection of two datespans. spanIntersect (DateSpan b1 e1) (DateSpan b2 e2) = DateSpan b e where b = latest b1 b2 e = earliest e1 e2 -- | Fill any unspecified dates in the first span with the dates from -- the second one. Sort of a one-way spanIntersect. spanDefaultsFrom (DateSpan a1 b1) (DateSpan a2 b2) = DateSpan a b where a = if isJust a1 then a1 else a2 b = if isJust b1 then b1 else b2 -- | Calculate the union of a number of datespans. spansUnion [] = nulldatespan spansUnion [d] = d spansUnion (d:ds) = d `spanUnion` (spansUnion ds) -- | Calculate the union of two datespans. spanUnion (DateSpan b1 e1) (DateSpan b2 e2) = DateSpan b e where b = earliest b1 b2 e = latest e1 e2 latest d Nothing = d latest Nothing d = d latest (Just d1) (Just d2) = Just $ max d1 d2 earliest d Nothing = d earliest Nothing d = d earliest (Just d1) (Just d2) = Just $ min d1 d2 -- | Parse a period expression to an Interval and overall DateSpan using -- the provided reference date, or return a parse error. parsePeriodExpr :: Day -> Text -> Either (ParseError Char Dec) (Interval, DateSpan) parsePeriodExpr refdate = parsewith (periodexpr refdate <* eof) maybePeriod :: Day -> Text -> Maybe (Interval,DateSpan) maybePeriod refdate = either (const Nothing) Just . parsePeriodExpr refdate -- | Show a DateSpan as a human-readable pseudo-period-expression string. -- dateSpanAsText :: DateSpan -> String -- dateSpanAsText (DateSpan Nothing Nothing) = "all" -- dateSpanAsText (DateSpan Nothing (Just e)) = printf "to %s" (show e) -- dateSpanAsText (DateSpan (Just b) Nothing) = printf "from %s" (show b) -- dateSpanAsText (DateSpan (Just b) (Just e)) = printf "%s to %s" (show b) (show e) -- | Convert a single smart date string to a date span using the provided -- reference date, or raise an error. -- spanFromSmartDateString :: Day -> String -> DateSpan -- spanFromSmartDateString refdate s = spanFromSmartDate refdate sdate -- where -- sdate = fromparse $ parsewith smartdateonly s spanFromSmartDate :: Day -> SmartDate -> DateSpan spanFromSmartDate refdate sdate = DateSpan (Just b) (Just e) where (ry,rm,_) = toGregorian refdate (b,e) = span sdate span :: SmartDate -> (Day,Day) span ("","","today") = (refdate, nextday refdate) span ("","this","day") = (refdate, nextday refdate) span ("","","yesterday") = (prevday refdate, refdate) span ("","last","day") = (prevday refdate, refdate) span ("","","tomorrow") = (nextday refdate, addDays 2 refdate) span ("","next","day") = (nextday refdate, addDays 2 refdate) span ("","last","week") = (prevweek refdate, thisweek refdate) span ("","this","week") = (thisweek refdate, nextweek refdate) span ("","next","week") = (nextweek refdate, startofweek $ addDays 14 refdate) span ("","last","month") = (prevmonth refdate, thismonth refdate) span ("","this","month") = (thismonth refdate, nextmonth refdate) span ("","next","month") = (nextmonth refdate, startofmonth $ addGregorianMonthsClip 2 refdate) span ("","last","quarter") = (prevquarter refdate, thisquarter refdate) span ("","this","quarter") = (thisquarter refdate, nextquarter refdate) span ("","next","quarter") = (nextquarter refdate, startofquarter $ addGregorianMonthsClip 6 refdate) span ("","last","year") = (prevyear refdate, thisyear refdate) span ("","this","year") = (thisyear refdate, nextyear refdate) span ("","next","year") = (nextyear refdate, startofyear $ addGregorianYearsClip 2 refdate) span ("","",d) = (day, nextday day) where day = fromGregorian ry rm (read d) span ("",m,"") = (startofmonth day, nextmonth day) where day = fromGregorian ry (read m) 1 span ("",m,d) = (day, nextday day) where day = fromGregorian ry (read m) (read d) span (y,"","") = (startofyear day, nextyear day) where day = fromGregorian (read y) 1 1 span (y,m,"") = (startofmonth day, nextmonth day) where day = fromGregorian (read y) (read m) 1 span (y,m,d) = (day, nextday day) where day = fromGregorian (read y) (read m) (read d) -- showDay :: Day -> String -- showDay day = printf "%04d/%02d/%02d" y m d where (y,m,d) = toGregorian day -- | Convert a smart date string to an explicit yyyy\/mm\/dd string using -- the provided reference date, or raise an error. fixSmartDateStr :: Day -> Text -> String fixSmartDateStr d s = either (\e->error' $ printf "could not parse date %s %s" (show s) (show e)) id $ (fixSmartDateStrEither d s :: Either (ParseError Char Dec) String) -- | A safe version of fixSmartDateStr. fixSmartDateStrEither :: Day -> Text -> Either (ParseError Char Dec) String fixSmartDateStrEither d = either Left (Right . showDate) . fixSmartDateStrEither' d fixSmartDateStrEither' :: Day -> Text -> Either (ParseError Char Dec) Day fixSmartDateStrEither' d s = case parsewith smartdateonly (T.toLower s) of Right sd -> Right $ fixSmartDate d sd Left e -> Left e -- | Convert a SmartDate to an absolute date using the provided reference date. -- -- ==== Examples: -- >>> :set -XOverloadedStrings -- >>> let t = fixSmartDateStr (parsedate "2008/11/26") -- >>> t "0000-01-01" -- "0000/01/01" -- >>> t "1999-12-02" -- "1999/12/02" -- >>> t "1999.12.02" -- "1999/12/02" -- >>> t "1999/3/2" -- "1999/03/02" -- >>> t "19990302" -- "1999/03/02" -- >>> t "2008/2" -- "2008/02/01" -- >>> t "0020/2" -- "0020/02/01" -- >>> t "1000" -- "1000/01/01" -- >>> t "4/2" -- "2008/04/02" -- >>> t "2" -- "2008/11/02" -- >>> t "January" -- "2008/01/01" -- >>> t "feb" -- "2008/02/01" -- >>> t "today" -- "2008/11/26" -- >>> t "yesterday" -- "2008/11/25" -- >>> t "tomorrow" -- "2008/11/27" -- >>> t "this day" -- "2008/11/26" -- >>> t "last day" -- "2008/11/25" -- >>> t "next day" -- "2008/11/27" -- >>> t "this week" -- last monday -- "2008/11/24" -- >>> t "last week" -- previous monday -- "2008/11/17" -- >>> t "next week" -- next monday -- "2008/12/01" -- >>> t "this month" -- "2008/11/01" -- >>> t "last month" -- "2008/10/01" -- >>> t "next month" -- "2008/12/01" -- >>> t "this quarter" -- "2008/10/01" -- >>> t "last quarter" -- "2008/07/01" -- >>> t "next quarter" -- "2009/01/01" -- >>> t "this year" -- "2008/01/01" -- >>> t "last year" -- "2007/01/01" -- >>> t "next year" -- "2009/01/01" -- -- t "last wed" -- "2008/11/19" -- t "next friday" -- "2008/11/28" -- t "next january" -- "2009/01/01" -- fixSmartDate :: Day -> SmartDate -> Day fixSmartDate refdate sdate = fix sdate where fix :: SmartDate -> Day fix ("","","today") = fromGregorian ry rm rd fix ("","this","day") = fromGregorian ry rm rd fix ("","","yesterday") = prevday refdate fix ("","last","day") = prevday refdate fix ("","","tomorrow") = nextday refdate fix ("","next","day") = nextday refdate fix ("","last","week") = prevweek refdate fix ("","this","week") = thisweek refdate fix ("","next","week") = nextweek refdate fix ("","last","month") = prevmonth refdate fix ("","this","month") = thismonth refdate fix ("","next","month") = nextmonth refdate fix ("","last","quarter") = prevquarter refdate fix ("","this","quarter") = thisquarter refdate fix ("","next","quarter") = nextquarter refdate fix ("","last","year") = prevyear refdate fix ("","this","year") = thisyear refdate fix ("","next","year") = nextyear refdate fix ("","",d) = fromGregorian ry rm (read d) fix ("",m,"") = fromGregorian ry (read m) 1 fix ("",m,d) = fromGregorian ry (read m) (read d) fix (y,"","") = fromGregorian (read y) 1 1 fix (y,m,"") = fromGregorian (read y) (read m) 1 fix (y,m,d) = fromGregorian (read y) (read m) (read d) (ry,rm,rd) = toGregorian refdate prevday :: Day -> Day prevday = addDays (-1) nextday = addDays 1 startofday = id thisweek = startofweek prevweek = startofweek . addDays (-7) nextweek = startofweek . addDays 7 startofweek day = fromMondayStartWeek y w 1 where (y,_,_) = toGregorian day (w,_) = mondayStartWeek day thismonth = startofmonth prevmonth = startofmonth . addGregorianMonthsClip (-1) nextmonth = startofmonth . addGregorianMonthsClip 1 startofmonth day = fromGregorian y m 1 where (y,m,_) = toGregorian day thisquarter = startofquarter prevquarter = startofquarter . addGregorianMonthsClip (-3) nextquarter = startofquarter . addGregorianMonthsClip 3 startofquarter day = fromGregorian y (firstmonthofquarter m) 1 where (y,m,_) = toGregorian day firstmonthofquarter m = ((m-1) `div` 3) * 3 + 1 thisyear = startofyear prevyear = startofyear . addGregorianYearsClip (-1) nextyear = startofyear . addGregorianYearsClip 1 startofyear day = fromGregorian y 1 1 where (y,_,_) = toGregorian day nthdayofmonthcontaining n d | d1 >= d = d1 | otherwise = d2 where d1 = addDays (fromIntegral n-1) s d2 = addDays (fromIntegral n-1) $ nextmonth s s = startofmonth d nthdayofweekcontaining n d | d1 >= d = d1 | otherwise = d2 where d1 = addDays (fromIntegral n-1) s d2 = addDays (fromIntegral n-1) $ nextweek s s = startofweek d ---------------------------------------------------------------------- -- parsing -- -- | Parse a couple of date-time string formats to a time type. -- parsedatetimeM :: String -> Maybe LocalTime -- parsedatetimeM s = firstJust [ -- parseTime defaultTimeLocale "%Y/%m/%d %H:%M:%S" s, -- parseTime defaultTimeLocale "%Y-%m-%d %H:%M:%S" s -- ] parsetime :: ParseTime t => TimeLocale -> String -> String -> Maybe t parsetime = #if MIN_VERSION_time(1,5,0) parseTimeM True #else parseTime #endif -- | Parse a couple of date string formats to a time type. parsedateM :: String -> Maybe Day parsedateM s = firstJust [ parsetime defaultTimeLocale "%Y/%m/%d" s, parsetime defaultTimeLocale "%Y-%m-%d" s ] -- -- | Parse a date-time string to a time type, or raise an error. -- parsedatetime :: String -> LocalTime -- parsedatetime s = fromMaybe (error' $ "could not parse timestamp \"" ++ s ++ "\"") -- (parsedatetimeM s) -- | Parse a YYYY-MM-DD or YYYY/MM/DD date string to a Day, or raise an error. For testing/debugging. -- -- >>> parsedate "2008/02/03" -- 2008-02-03 parsedate :: String -> Day parsedate s = fromMaybe (error' $ "could not parse date \"" ++ s ++ "\"") (parsedateM s) -- doctests I haven't been able to make compatible with both GHC 7 and 8 -- -- >>> parsedate "2008/02/03/" -- -- *** Exception: could not parse date "2008/02/03/" -- #if MIN_VERSION_base(4,9,0) -- -- ... -- #endif -- #if MIN_VERSION_time(1,6,0) -- -- >>> parsedate "2008/02/30" -- with time >= 1.6, invalid dates are rejected -- -- *** Exception: could not parse date "2008/02/30" -- #if MIN_VERSION_base(4,9,0) -- -- ... -- #endif -- #else -- -- >>> parsedate "2008/02/30" -- with time < 1.6, they are silently adjusted -- -- 2008-02-29 -- #endif -- | Parse a time string to a time type using the provided pattern, or -- return the default. _parsetimewith :: ParseTime t => String -> String -> t -> t _parsetimewith pat s def = fromMaybe def $ parsetime defaultTimeLocale pat s {-| Parse a date in any of the formats allowed in ledger's period expressions, and maybe some others: > 2004 > 2004/10 > 2004/10/1 > 10/1 > 21 > october, oct > yesterday, today, tomorrow > this/next/last week/day/month/quarter/year Returns a SmartDate, to be converted to a full date later (see fixSmartDate). Assumes any text in the parse stream has been lowercased. -} smartdate :: Parser SmartDate smartdate = do -- XXX maybe obscures date errors ? see ledgerdate (y,m,d) <- choice' [yyyymmdd, ymd, ym, md, y, d, month, mon, today, yesterday, tomorrow, lastthisnextthing] return (y,m,d) -- | Like smartdate, but there must be nothing other than whitespace after the date. smartdateonly :: Parser SmartDate smartdateonly = do d <- smartdate many spacenonewline eof return d datesepchars :: [Char] datesepchars = "/-." datesepchar :: TextParser m Char datesepchar = oneOf datesepchars validYear, validMonth, validDay :: String -> Bool validYear s = length s >= 4 && isJust (readMay s :: Maybe Year) validMonth s = maybe False (\n -> n>=1 && n<=12) $ readMay s validDay s = maybe False (\n -> n>=1 && n<=31) $ readMay s failIfInvalidYear, failIfInvalidMonth, failIfInvalidDay :: (Monad m) => String -> m () failIfInvalidYear s = unless (validYear s) $ fail $ "bad year number: " ++ s failIfInvalidMonth s = unless (validMonth s) $ fail $ "bad month number: " ++ s failIfInvalidDay s = unless (validDay s) $ fail $ "bad day number: " ++ s yyyymmdd :: Parser SmartDate yyyymmdd = do y <- count 4 digitChar m <- count 2 digitChar failIfInvalidMonth m d <- count 2 digitChar failIfInvalidDay d return (y,m,d) ymd :: Parser SmartDate ymd = do y <- some digitChar failIfInvalidYear y sep <- datesepchar m <- some digitChar failIfInvalidMonth m char sep d <- some digitChar failIfInvalidDay d return $ (y,m,d) ym :: Parser SmartDate ym = do y <- some digitChar failIfInvalidYear y datesepchar m <- some digitChar failIfInvalidMonth m return (y,m,"") y :: Parser SmartDate y = do y <- some digitChar failIfInvalidYear y return (y,"","") d :: Parser SmartDate d = do d <- some digitChar failIfInvalidDay d return ("","",d) md :: Parser SmartDate md = do m <- some digitChar failIfInvalidMonth m datesepchar d <- some digitChar failIfInvalidDay d return ("",m,d) months = ["january","february","march","april","may","june", "july","august","september","october","november","december"] monthabbrevs = ["jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"] -- weekdays = ["monday","tuesday","wednesday","thursday","friday","saturday","sunday"] -- weekdayabbrevs = ["mon","tue","wed","thu","fri","sat","sun"] monthIndex s = maybe 0 (+1) $ lowercase s `elemIndex` months monIndex s = maybe 0 (+1) $ lowercase s `elemIndex` monthabbrevs month :: Parser SmartDate month = do m <- choice $ map (try . string) months let i = monthIndex m return ("",show i,"") mon :: Parser SmartDate mon = do m <- choice $ map (try . string) monthabbrevs let i = monIndex m return ("",show i,"") today,yesterday,tomorrow :: Parser SmartDate today = string "today" >> return ("","","today") yesterday = string "yesterday" >> return ("","","yesterday") tomorrow = string "tomorrow" >> return ("","","tomorrow") lastthisnextthing :: Parser SmartDate lastthisnextthing = do r <- choice [ string "last" ,string "this" ,string "next" ] many spacenonewline -- make the space optional for easier scripting p <- choice [ string "day" ,string "week" ,string "month" ,string "quarter" ,string "year" ] -- XXX support these in fixSmartDate -- ++ (map string $ months ++ monthabbrevs ++ weekdays ++ weekdayabbrevs) return ("",r,p) -- | -- >>> let p = parsewith (periodexpr (parsedate "2008/11/26")) :: T.Text -> Either (ParseError Char Dec) (Interval, DateSpan) -- >>> p "from aug to oct" -- Right (NoInterval,DateSpan 2008/08/01-2008/09/30) -- >>> p "aug to oct" -- Right (NoInterval,DateSpan 2008/08/01-2008/09/30) -- >>> p "every 3 days in aug" -- Right (Days 3,DateSpan 2008/08) -- >>> p "daily from aug" -- Right (Days 1,DateSpan 2008/08/01-) -- >>> p "every week to 2009" -- Right (Weeks 1,DateSpan -2008/12/31) periodexpr :: Day -> Parser (Interval, DateSpan) periodexpr rdate = choice $ map try [ intervalanddateperiodexpr rdate, intervalperiodexpr, dateperiodexpr rdate, (return (NoInterval,DateSpan Nothing Nothing)) ] intervalanddateperiodexpr :: Day -> Parser (Interval, DateSpan) intervalanddateperiodexpr rdate = do many spacenonewline i <- reportinginterval many spacenonewline s <- periodexprdatespan rdate return (i,s) intervalperiodexpr :: Parser (Interval, DateSpan) intervalperiodexpr = do many spacenonewline i <- reportinginterval return (i, DateSpan Nothing Nothing) dateperiodexpr :: Day -> Parser (Interval, DateSpan) dateperiodexpr rdate = do many spacenonewline s <- periodexprdatespan rdate return (NoInterval, s) -- Parse a reporting interval. reportinginterval :: Parser Interval reportinginterval = choice' [ tryinterval "day" "daily" Days, tryinterval "week" "weekly" Weeks, tryinterval "month" "monthly" Months, tryinterval "quarter" "quarterly" Quarters, tryinterval "year" "yearly" Years, do string "biweekly" return $ Weeks 2, do string "bimonthly" return $ Months 2, do string "every" many spacenonewline n <- fmap read $ some digitChar thsuffix many spacenonewline string "day" many spacenonewline string "of" many spacenonewline string "week" return $ DayOfWeek n, do string "every" many spacenonewline n <- fmap read $ some digitChar thsuffix many spacenonewline string "day" optional $ do many spacenonewline string "of" many spacenonewline string "month" return $ DayOfMonth n ] where thsuffix = choice' $ map string ["st","nd","rd","th"] -- Parse any of several variants of a basic interval, eg "daily", "every day", "every N days". tryinterval :: String -> String -> (Int -> Interval) -> Parser Interval tryinterval singular compact intcons = choice' [ do string compact return $ intcons 1, do string "every" many spacenonewline string singular return $ intcons 1, do string "every" many spacenonewline n <- fmap read $ some digitChar many spacenonewline string plural return $ intcons n ] where plural = singular ++ "s" periodexprdatespan :: Day -> Parser DateSpan periodexprdatespan rdate = choice $ map try [ doubledatespan rdate, fromdatespan rdate, todatespan rdate, justdatespan rdate ] doubledatespan :: Day -> Parser DateSpan doubledatespan rdate = do optional (string "from" >> many spacenonewline) b <- smartdate many spacenonewline optional (choice [string "to", string "-"] >> many spacenonewline) e <- smartdate return $ DateSpan (Just $ fixSmartDate rdate b) (Just $ fixSmartDate rdate e) fromdatespan :: Day -> Parser DateSpan fromdatespan rdate = do b <- choice [ do string "from" >> many spacenonewline smartdate , do d <- smartdate string "-" return d ] return $ DateSpan (Just $ fixSmartDate rdate b) Nothing todatespan :: Day -> Parser DateSpan todatespan rdate = do choice [string "to", string "-"] >> many spacenonewline e <- smartdate return $ DateSpan Nothing (Just $ fixSmartDate rdate e) justdatespan :: Day -> Parser DateSpan justdatespan rdate = do optional (string "in" >> many spacenonewline) d <- smartdate return $ spanFromSmartDate rdate d -- | Make a datespan from two valid date strings parseable by parsedate -- (or raise an error). Eg: mkdatespan \"2011/1/1\" \"2011/12/31\". mkdatespan :: String -> String -> DateSpan mkdatespan b = DateSpan (Just $ parsedate b) . Just . parsedate nulldatespan :: DateSpan nulldatespan = DateSpan Nothing Nothing nulldate :: Day nulldate = fromGregorian 0 1 1 hledger-lib-1.2/Hledger/Data/Journal.hs0000644000000000000000000012716513066173044016131 0ustar0000000000000000{-# LANGUAGE Rank2Types #-} {-# LANGUAGE StandaloneDeriving, OverloadedStrings #-} {-| A 'Journal' is a set of transactions, plus optional related data. This is hledger's primary data object. It is usually parsed from a journal file or other data format (see "Hledger.Read"). -} module Hledger.Data.Journal ( -- * Parsing helpers addMarketPrice, addModifierTransaction, addPeriodicTransaction, addTransaction, journalApplyAliases, journalBalanceTransactions, journalApplyCommodityStyles, commodityStylesFromAmounts, journalConvertAmountsToCost, journalFinalise, -- * Filtering filterJournalTransactions, filterJournalPostings, filterJournalAmounts, filterTransactionAmounts, filterTransactionPostings, filterPostingAmount, -- * Querying journalAccountNames, journalAccountNamesUsed, -- journalAmountAndPriceCommodities, journalAmounts, overJournalAmounts, traverseJournalAmounts, -- journalCanonicalCommodities, journalDateSpan, journalDescriptions, journalFilePath, journalFilePaths, journalTransactionAt, journalNextTransaction, journalPrevTransaction, journalPostings, -- * Standard account types journalBalanceSheetAccountQuery, journalProfitAndLossAccountQuery, journalIncomeAccountQuery, journalExpenseAccountQuery, journalAssetAccountQuery, journalLiabilityAccountQuery, journalEquityAccountQuery, journalCashAccountQuery, -- * Misc canonicalStyleFrom, matchpats, nulljournal, journalCheckBalanceAssertions, journalNumberAndTieTransactions, journalUntieTransactions, -- * Tests samplejournal, tests_Hledger_Data_Journal, ) where import Control.Applicative (Const(..)) import Control.Arrow import Control.Monad import Control.Monad.Except import qualified Control.Monad.Reader as R import Control.Monad.ST import Data.Array.ST import Data.Functor.Identity (Identity(..)) import qualified Data.HashTable.ST.Cuckoo as HT import Data.List -- import Data.Map (findWithDefault) import Data.Maybe import Data.Monoid import Data.Ord import Data.Text (Text) import qualified Data.Text as T import Safe (headMay, headDef) import Data.Time.Calendar import Data.Tree import System.Time (ClockTime(TOD)) import Test.HUnit import Text.Printf import qualified Data.Map as M import Hledger.Utils import Hledger.Data.Types import Hledger.Data.AccountName import Hledger.Data.Amount -- import Hledger.Data.Commodity import Hledger.Data.Dates import Hledger.Data.Transaction import Hledger.Data.Posting import Hledger.Query -- try to make Journal ppShow-compatible -- instance Show ClockTime where -- show t = "" -- deriving instance Show Journal instance Show Journal where show j | debugLevel < 3 = printf "Journal %s with %d transactions, %d accounts" (journalFilePath j) (length (jtxns j) + length (jmodifiertxns j) + length (jperiodictxns j)) (length accounts) | debugLevel < 6 = printf "Journal %s with %d transactions, %d accounts: %s" (journalFilePath j) (length (jtxns j) + length (jmodifiertxns j) + length (jperiodictxns j)) (length accounts) (show accounts) | otherwise = printf "Journal %s with %d transactions, %d accounts: %s, commodity styles: %s" (journalFilePath j) (length (jtxns j) + length (jmodifiertxns j) + length (jperiodictxns j)) (length accounts) (show accounts) (show $ jinferredcommodities j) -- ++ (show $ journalTransactions l) where accounts = filter (/= "root") $ flatten $ journalAccountNameTree j -- showJournalDebug j = unlines [ -- show j -- ,show (jtxns j) -- ,show (jmodifiertxns j) -- ,show (jperiodictxns j) -- ,show $ jparsetimeclockentries j -- ,show $ jmarketprices j -- ,show $ jfinalcommentlines j -- ,show $ jparsestate j -- ,show $ map fst $ jfiles j -- ] -- The monoid instance for Journal is useful for two situations. -- -- 1. concatenating finalised journals, eg with multiple -f options: -- FIRST <> SECOND. The second's list fields are appended to the -- first's, map fields are combined, transaction counts are summed, -- the parse state of the second is kept. -- -- 2. merging a child parsed journal, eg with the include directive: -- CHILD <> PARENT. A parsed journal's data is in reverse order, so -- this gives what we want. -- instance Monoid Journal where mempty = nulljournal mappend j1 j2 = Journal { jparsedefaultyear = jparsedefaultyear j2 ,jparsedefaultcommodity = jparsedefaultcommodity j2 ,jparseparentaccounts = jparseparentaccounts j2 ,jparsealiases = jparsealiases j2 -- ,jparsetransactioncount = jparsetransactioncount j1 + jparsetransactioncount j2 ,jparsetimeclockentries = jparsetimeclockentries j1 <> jparsetimeclockentries j2 ,jaccounts = jaccounts j1 <> jaccounts j2 ,jcommodities = jcommodities j1 <> jcommodities j2 ,jinferredcommodities = jinferredcommodities j1 <> jinferredcommodities j2 ,jmarketprices = jmarketprices j1 <> jmarketprices j2 ,jmodifiertxns = jmodifiertxns j1 <> jmodifiertxns j2 ,jperiodictxns = jperiodictxns j1 <> jperiodictxns j2 ,jtxns = jtxns j1 <> jtxns j2 ,jfinalcommentlines = jfinalcommentlines j2 ,jfiles = jfiles j1 <> jfiles j2 ,jlastreadtime = max (jlastreadtime j1) (jlastreadtime j2) } nulljournal :: Journal nulljournal = Journal { jparsedefaultyear = Nothing ,jparsedefaultcommodity = Nothing ,jparseparentaccounts = [] ,jparsealiases = [] -- ,jparsetransactioncount = 0 ,jparsetimeclockentries = [] ,jaccounts = [] ,jcommodities = M.fromList [] ,jinferredcommodities = M.fromList [] ,jmarketprices = [] ,jmodifiertxns = [] ,jperiodictxns = [] ,jtxns = [] ,jfinalcommentlines = "" ,jfiles = [] ,jlastreadtime = TOD 0 0 } journalFilePath :: Journal -> FilePath journalFilePath = fst . mainfile journalFilePaths :: Journal -> [FilePath] journalFilePaths = map fst . jfiles mainfile :: Journal -> (FilePath, Text) mainfile = headDef ("", "") . jfiles addTransaction :: Transaction -> Journal -> Journal addTransaction t j = j { jtxns = t : jtxns j } addModifierTransaction :: ModifierTransaction -> Journal -> Journal addModifierTransaction mt j = j { jmodifiertxns = mt : jmodifiertxns j } addPeriodicTransaction :: PeriodicTransaction -> Journal -> Journal addPeriodicTransaction pt j = j { jperiodictxns = pt : jperiodictxns j } addMarketPrice :: MarketPrice -> Journal -> Journal addMarketPrice h j = j { jmarketprices = h : jmarketprices j } -- | Get the transaction with this index (its 1-based position in the input stream), if any. journalTransactionAt :: Journal -> Integer -> Maybe Transaction journalTransactionAt Journal{jtxns=ts} i = -- it's probably ts !! (i+1), but we won't assume headMay [t | t <- ts, tindex t == i] -- | Get the transaction that appeared immediately after this one in the input stream, if any. journalNextTransaction :: Journal -> Transaction -> Maybe Transaction journalNextTransaction j t = journalTransactionAt j (tindex t + 1) -- | Get the transaction that appeared immediately before this one in the input stream, if any. journalPrevTransaction :: Journal -> Transaction -> Maybe Transaction journalPrevTransaction j t = journalTransactionAt j (tindex t - 1) -- | Unique transaction descriptions used in this journal. journalDescriptions :: Journal -> [Text] journalDescriptions = nub . sort . map tdescription . jtxns -- | All postings from this journal's transactions, in order. journalPostings :: Journal -> [Posting] journalPostings = concatMap tpostings . jtxns -- | Unique account names posted to in this journal. journalAccountNamesUsed :: Journal -> [AccountName] journalAccountNamesUsed = sort . accountNamesFromPostings . journalPostings -- | Unique account names in this journal, including parent accounts containing no postings. journalAccountNames :: Journal -> [AccountName] journalAccountNames = sort . expandAccountNames . journalAccountNamesUsed journalAccountNameTree :: Journal -> Tree AccountName journalAccountNameTree = accountNameTreeFrom . journalAccountNames -- standard account types -- | A query for Profit & Loss accounts in this journal. -- Cf . journalProfitAndLossAccountQuery :: Journal -> Query journalProfitAndLossAccountQuery j = Or [journalIncomeAccountQuery j ,journalExpenseAccountQuery j ] -- | A query for Income (Revenue) accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^(income|revenue)s?(:|$)@. journalIncomeAccountQuery :: Journal -> Query journalIncomeAccountQuery _ = Acct "^(income|revenue)s?(:|$)" -- | A query for Expense accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^expenses?(:|$)@. journalExpenseAccountQuery :: Journal -> Query journalExpenseAccountQuery _ = Acct "^expenses?(:|$)" -- | A query for Asset, Liability & Equity accounts in this journal. -- Cf . journalBalanceSheetAccountQuery :: Journal -> Query journalBalanceSheetAccountQuery j = Or [journalAssetAccountQuery j ,journalLiabilityAccountQuery j ,journalEquityAccountQuery j ] -- | A query for Asset accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^assets?(:|$)@. journalAssetAccountQuery :: Journal -> Query journalAssetAccountQuery _ = Acct "^assets?(:|$)" -- | A query for Liability accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^(debts?|liabilit(y|ies))(:|$)@. journalLiabilityAccountQuery :: Journal -> Query journalLiabilityAccountQuery _ = Acct "^(debts?|liabilit(y|ies))(:|$)" -- | A query for Equity accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^equity(:|$)@. journalEquityAccountQuery :: Journal -> Query journalEquityAccountQuery _ = Acct "^equity(:|$)" -- | A query for Cash (-equivalent) accounts in this journal (ie, -- accounts which appear on the cashflow statement.) This is currently -- hard-coded to be all the Asset accounts except for those containing the -- case-insensitive regex @(receivable|A/R)@. journalCashAccountQuery :: Journal -> Query journalCashAccountQuery j = And [journalAssetAccountQuery j, Not $ Acct "(receivable|A/R)"] -- Various kinds of filtering on journals. We do it differently depending -- on the command. ------------------------------------------------------------------------------- -- filtering V2 -- | Keep only transactions matching the query expression. filterJournalTransactions :: Query -> Journal -> Journal filterJournalTransactions q j@Journal{jtxns=ts} = j{jtxns=filter (q `matchesTransaction`) ts} -- | Keep only postings matching the query expression. -- This can leave unbalanced transactions. filterJournalPostings :: Query -> Journal -> Journal filterJournalPostings q j@Journal{jtxns=ts} = j{jtxns=map (filterTransactionPostings q) ts} -- | Within each posting's amount, keep only the parts matching the query. -- This can leave unbalanced transactions. filterJournalAmounts :: Query -> Journal -> Journal filterJournalAmounts q j@Journal{jtxns=ts} = j{jtxns=map (filterTransactionAmounts q) ts} -- | Filter out all parts of this transaction's amounts which do not match the query. -- This can leave the transaction unbalanced. filterTransactionAmounts :: Query -> Transaction -> Transaction filterTransactionAmounts q t@Transaction{tpostings=ps} = t{tpostings=map (filterPostingAmount q) ps} -- | Filter out all parts of this posting's amount which do not match the query. filterPostingAmount :: Query -> Posting -> Posting filterPostingAmount q p@Posting{pamount=Mixed as} = p{pamount=Mixed $ filter (q `matchesAmount`) as} filterTransactionPostings :: Query -> Transaction -> Transaction filterTransactionPostings q t@Transaction{tpostings=ps} = t{tpostings=filter (q `matchesPosting`) ps} {- ------------------------------------------------------------------------------- -- filtering V1 -- | Keep only transactions we are interested in, as described by the -- filter specification. filterJournalTransactions :: FilterSpec -> Journal -> Journal filterJournalTransactions FilterSpec{datespan=datespan ,cleared=cleared -- ,real=real -- ,empty=empty ,acctpats=apats ,descpats=dpats ,depth=depth ,fMetadata=md } = filterJournalTransactionsByClearedStatus cleared . filterJournalPostingsByDepth depth . filterJournalTransactionsByAccount apats . filterJournalTransactionsByMetadata md . filterJournalTransactionsByDescription dpats . filterJournalTransactionsByDate datespan -- | Keep only postings we are interested in, as described by the filter -- specification. This can leave unbalanced transactions. filterJournalPostings :: FilterSpec -> Journal -> Journal filterJournalPostings FilterSpec{datespan=datespan ,cleared=cleared ,real=real ,empty=empty ,acctpats=apats ,descpats=dpats ,depth=depth ,fMetadata=md } = filterJournalPostingsByRealness real . filterJournalPostingsByClearedStatus cleared . filterJournalPostingsByEmpty empty . filterJournalPostingsByDepth depth . filterJournalPostingsByAccount apats . filterJournalTransactionsByMetadata md . filterJournalTransactionsByDescription dpats . filterJournalTransactionsByDate datespan -- | Keep only transactions whose metadata matches all metadata specifications. filterJournalTransactionsByMetadata :: [(String,String)] -> Journal -> Journal filterJournalTransactionsByMetadata pats j@Journal{jtxns=ts} = j{jtxns=filter matchmd ts} where matchmd t = all (`elem` tmetadata t) pats -- | Keep only transactions whose description matches the description patterns. filterJournalTransactionsByDescription :: [String] -> Journal -> Journal filterJournalTransactionsByDescription pats j@Journal{jtxns=ts} = j{jtxns=filter matchdesc ts} where matchdesc = matchpats pats . tdescription -- | Keep only transactions which fall between begin and end dates. -- We include transactions on the begin date and exclude transactions on the end -- date, like ledger. An empty date string means no restriction. filterJournalTransactionsByDate :: DateSpan -> Journal -> Journal filterJournalTransactionsByDate (DateSpan begin end) j@Journal{jtxns=ts} = j{jtxns=filter match ts} where match t = maybe True (tdate t>=) begin && maybe True (tdate t<) end -- | Keep only transactions which have the requested cleared/uncleared -- status, if there is one. filterJournalTransactionsByClearedStatus :: Maybe Bool -> Journal -> Journal filterJournalTransactionsByClearedStatus Nothing j = j filterJournalTransactionsByClearedStatus (Just val) j@Journal{jtxns=ts} = j{jtxns=filter match ts} where match = (==val).tstatus -- | Keep only postings which have the requested cleared/uncleared status, -- if there is one. filterJournalPostingsByClearedStatus :: Maybe Bool -> Journal -> Journal filterJournalPostingsByClearedStatus Nothing j = j filterJournalPostingsByClearedStatus (Just c) j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter ((==c) . postingCleared) ps} -- | Strip out any virtual postings, if the flag is true, otherwise do -- no filtering. filterJournalPostingsByRealness :: Bool -> Journal -> Journal filterJournalPostingsByRealness False j = j filterJournalPostingsByRealness True j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter isReal ps} -- | Strip out any postings with zero amount, unless the flag is true. filterJournalPostingsByEmpty :: Bool -> Journal -> Journal filterJournalPostingsByEmpty True j = j filterJournalPostingsByEmpty False j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter (not . isEmptyPosting) ps} -- -- | Keep only transactions which affect accounts deeper than the specified depth. -- filterJournalTransactionsByDepth :: Maybe Int -> Journal -> Journal -- filterJournalTransactionsByDepth Nothing j = j -- filterJournalTransactionsByDepth (Just d) j@Journal{jtxns=ts} = -- j{jtxns=(filter (any ((<= d+1) . accountNameLevel . paccount) . tpostings) ts)} -- | Strip out any postings to accounts deeper than the specified depth -- (and any transactions which have no postings as a result). filterJournalPostingsByDepth :: Maybe Int -> Journal -> Journal filterJournalPostingsByDepth Nothing j = j filterJournalPostingsByDepth (Just d) j@Journal{jtxns=ts} = j{jtxns=filter (not . null . tpostings) $ map filtertxns ts} where filtertxns t@Transaction{tpostings=ps} = t{tpostings=filter ((<= d) . accountNameLevel . paccount) ps} -- | Keep only postings which affect accounts matched by the account patterns. -- This can leave transactions unbalanced. filterJournalPostingsByAccount :: [String] -> Journal -> Journal filterJournalPostingsByAccount apats j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter (matchpats apats . paccount) ps} -- | Keep only transactions which affect accounts matched by the account patterns. -- More precisely: each positive account pattern excludes transactions -- which do not contain a posting to a matched account, and each negative -- account pattern excludes transactions containing a posting to a matched -- account. filterJournalTransactionsByAccount :: [String] -> Journal -> Journal filterJournalTransactionsByAccount apats j@Journal{jtxns=ts} = j{jtxns=filter tmatch ts} where tmatch t = (null positives || any positivepmatch ps) && (null negatives || not (any negativepmatch ps)) where ps = tpostings t positivepmatch p = any (`amatch` a) positives where a = paccount p negativepmatch p = any (`amatch` a) negatives where a = paccount p amatch pat a = regexMatchesCI (abspat pat) a (negatives,positives) = partition isnegativepat apats -} -- | Apply additional account aliases (eg from the command-line) to all postings in a journal. journalApplyAliases :: [AccountAlias] -> Journal -> Journal journalApplyAliases aliases j@Journal{jtxns=ts} = -- (if null aliases -- then id -- else (dbgtrace $ -- "applying additional command-line aliases:\n" -- ++ chomp (unlines $ map (" "++) $ lines $ ppShow aliases))) $ j{jtxns=map dotransaction ts} where dotransaction t@Transaction{tpostings=ps} = t{tpostings=map doposting ps} doposting p@Posting{paccount=a} = p{paccount= accountNameApplyAliases aliases a} -- | Do post-parse processing on a parsed journal to make it ready for -- use. Reverse parsed data to normal order, canonicalise amount -- formats, check/ensure that transactions are balanced, and maybe -- check balance assertions. journalFinalise :: ClockTime -> FilePath -> Text -> Bool -> ParsedJournal -> Either String Journal journalFinalise t path txt assrt j@Journal{jfiles=fs} = do (journalTieTransactions <$> (journalBalanceTransactions assrt $ journalApplyCommodityStyles $ j{ jfiles = (path,txt) : reverse fs , jlastreadtime = t , jtxns = reverse $ jtxns j -- NOTE: see addTransaction , jmodifiertxns = reverse $ jmodifiertxns j -- NOTE: see addModifierTransaction , jperiodictxns = reverse $ jperiodictxns j -- NOTE: see addPeriodicTransaction , jmarketprices = reverse $ jmarketprices j -- NOTE: see addMarketPrice })) journalNumberAndTieTransactions = journalTieTransactions . journalNumberTransactions -- | Number (set the tindex field) this journal's transactions, counting upward from 1. journalNumberTransactions :: Journal -> Journal journalNumberTransactions j@Journal{jtxns=ts} = j{jtxns=map (\(i,t) -> t{tindex=i}) $ zip [1..] ts} -- | Tie the knot in all of this journal's transactions, ensuring their postings -- refer to them. This should be done last, after any other transaction-modifying operations. journalTieTransactions :: Journal -> Journal journalTieTransactions j@Journal{jtxns=ts} = j{jtxns=map txnTieKnot ts} -- | Untie all transaction-posting knots in this journal, so that eg -- recursiveSize and GHCI's :sprint can work on it. journalUntieTransactions :: Transaction -> Transaction journalUntieTransactions t@Transaction{tpostings=ps} = t{tpostings=map (\p -> p{ptransaction=Nothing}) ps} -- | Check any balance assertions in the journal and return an error -- message if any of them fail. journalCheckBalanceAssertions :: Journal -> Either String Journal journalCheckBalanceAssertions j = runST $ journalBalanceTransactionsST True j (return ()) (\_ _ -> return ()) (const $ return j) -- noops -- | Check a posting's balance assertion and return an error if it -- fails. checkBalanceAssertion :: Posting -> MixedAmount -> Either String () checkBalanceAssertion p@Posting{ pbalanceassertion = Just ass} amt | isReallyZeroAmount diff = Right () | True = Left err where assertedcomm = acommodity ass actualbal = fromMaybe nullamt $ find ((== assertedcomm) . acommodity) (amounts amt) diff = ass - actualbal diffplus | isNegativeAmount diff == False = "+" | otherwise = "" err = printf (unlines [ "balance assertion error%s", "after posting:", "%s", "balance assertion details:", "date: %s", "account: %s", "commodity: %s", "calculated: %s", "asserted: %s (difference: %s)" ]) (case ptransaction p of Nothing -> ":" -- shouldn't happen Just t -> printf " in %s:\nin transaction:\n%s" (showGenericSourcePos $ tsourcepos t) (chomp $ show t) :: String) (showPostingLine p) (showDate $ postingDate p) (T.unpack $ paccount p) -- XXX pack assertedcomm (showAmount actualbal) (showAmount ass) (diffplus ++ showAmount diff) checkBalanceAssertion _ _ = Right () -- | Environment for 'CurrentBalancesModifier' data Env s = Env { eBalances :: HT.HashTable s AccountName MixedAmount , eStoreTx :: Transaction -> ST s () , eAssrt :: Bool , eStyles :: Maybe (M.Map CommoditySymbol AmountStyle) } -- | Monad transformer stack with a reference to a mutable hashtable -- of current account balances and a mutable array of finished -- transactions in original parsing order. type CurrentBalancesModifier s = R.ReaderT (Env s) (ExceptT String (ST s)) -- | Fill in any missing amounts and check that all journal transactions -- balance, or return an error message. This is done after parsing all -- amounts and applying canonical commodity styles, since balancing -- depends on display precision. Reports only the first error encountered. journalBalanceTransactions :: Bool -> Journal -> Either String Journal journalBalanceTransactions assrt j = runST $ journalBalanceTransactionsST assrt (journalNumberTransactions j) (newArray_ (1, genericLength $ jtxns j) :: forall s. ST s (STArray s Integer Transaction)) (\arr tx -> writeArray arr (tindex tx) tx) $ fmap (\txns -> j{ jtxns = txns}) . getElems -- | Generalization used in the definition of -- 'journalBalanceTransactionsST and 'journalCheckBalanceAssertions' journalBalanceTransactionsST :: Bool -> Journal -> ST s txns -- ^ creates transaction store -> (txns -> Transaction -> ST s ()) -- ^ "store" operation -> (txns -> ST s a) -- ^ calculate result from transactions -> ST s (Either String a) journalBalanceTransactionsST assrt j createStore storeIn extract = runExceptT $ do bals <- lift $ HT.newSized size txStore <- lift $ createStore flip R.runReaderT (Env bals (storeIn txStore) assrt $ Just $ jinferredcommodities j) $ do dated <- fmap snd . sortBy (comparing fst) . concat <$> mapM' discriminateByDate (jtxns j) mapM' checkInferAndRegisterAmounts dated lift $ extract txStore where size = genericLength $ journalPostings j -- | This converts a transaction into a list of objects whose dates -- have to be considered when checking balance assertions and handled -- by 'checkInferAndRegisterAmounts'. -- -- Transaction without balance assignments can be balanced and stored -- immediately and their (possibly) dated postings are returned. -- -- Transaction with balance assignments are only supported if no -- posting has a 'pdate' value. Supported transactions will be -- returned unchanged and balanced and stored later in 'checkInferAndRegisterAmounts'. discriminateByDate :: Transaction -> CurrentBalancesModifier s [(Day, Either Posting Transaction)] discriminateByDate tx | null (assignmentPostings tx) = do styles <- R.reader $ eStyles balanced <- lift $ ExceptT $ return $ balanceTransaction styles tx storeTransaction balanced return $ fmap (postingDate &&& (Left . removePrices)) $ tpostings $ balanced | True = do when (any (isJust . pdate) $ tpostings tx) $ throwError $ unlines $ ["Not supported: Transactions with balance assignments " ,"AND dated postings without amount:\n" , showTransaction tx] return [(tdate tx, Right $ tx { tpostings = removePrices <$> tpostings tx })] -- | This function takes different objects describing changes to -- account balances on a single day. It can handle either a single -- posting (from an already balanced transaction without assigments) -- or a whole transaction with assignments (which is required to no -- posting with pdate set.). -- -- For a single posting, there is not much to do. Only add its amount -- to its account and check the assertion, if there is one. This -- functionality is provided by 'addAmountAndCheckBalance'. -- -- For a whole transaction, it loops over all postings, and performs -- 'addAmountAndCheckBalance', if there is an amount. If there is no -- amount, the amount is inferred by the assertion or left empty if -- there is no assertion. Then, the transaction is balanced, the -- inferred amount added to the balance (all in -- 'balanceTransactionUpdate') and the resulting transaction with no -- missing amounts is stored in the array, for later retrieval. -- -- Again in short: -- -- 'Left Posting': Check the balance assertion and update the -- account balance. If the amount is empty do nothing. this can be -- the case e.g. for virtual postings -- -- 'Right Transaction': Loop over all postings, infer their amounts -- and then balance and store the transaction. checkInferAndRegisterAmounts :: Either Posting Transaction -> CurrentBalancesModifier s () checkInferAndRegisterAmounts (Left p) = void $ addAmountAndCheckBalance return p checkInferAndRegisterAmounts (Right oldTx) = do let ps = tpostings oldTx styles <- R.reader $ eStyles newPostings <- forM ps $ addAmountAndCheckBalance inferFromAssignment storeTransaction =<< balanceTransactionUpdate (fmap void . addToBalance) styles oldTx { tpostings = newPostings } where inferFromAssignment :: Posting -> CurrentBalancesModifier s Posting inferFromAssignment p = maybe (return p) (fmap (\a -> p { pamount = a, porigin = Just $ originalPosting p }) . setBalance (paccount p)) $ pbalanceassertion p -- | Adds a posting's amonut to the posting's account balance and -- checks a possible balance assertion. If there is no amount, it runs -- the supplied fallback action. addAmountAndCheckBalance :: (Posting -> CurrentBalancesModifier s Posting) -- ^ action to execute, if posting has no amount -> Posting -> CurrentBalancesModifier s Posting addAmountAndCheckBalance _ p | hasAmount p = do newAmt <- addToBalance (paccount p) $ pamount p assrt <- R.reader eAssrt lift $ when assrt $ ExceptT $ return $ checkBalanceAssertion p newAmt return p addAmountAndCheckBalance fallback p = fallback p -- | Sets an account's balance to a given amount and returns the -- difference of new and old amount setBalance :: AccountName -> Amount -> CurrentBalancesModifier s MixedAmount setBalance acc amt = liftModifier $ \Env{ eBalances = bals } -> do old <- HT.lookup bals acc let new = Mixed $ (amt :) $ maybe [] (filter ((/= acommodity amt) . acommodity) . amounts) old HT.insert bals acc new return $ maybe new (new -) old -- | Adds an amount to an account's balance and returns the resulting -- balance addToBalance :: AccountName -> MixedAmount -> CurrentBalancesModifier s MixedAmount addToBalance acc amt = liftModifier $ \Env{ eBalances = bals } -> do new <- maybe amt (+ amt) <$> HT.lookup bals acc HT.insert bals acc new return new -- | Stores a transaction in the transaction array in original parsing -- order. storeTransaction :: Transaction -> CurrentBalancesModifier s () storeTransaction tx = liftModifier $ ($tx) . eStoreTx -- | Helper function liftModifier :: (Env s -> ST s a) -> CurrentBalancesModifier s a liftModifier f = R.ask >>= lift . lift . f -- | Choose and apply a consistent display format to the posting -- amounts in each commodity. Each commodity's format is specified by -- a commodity format directive, or otherwise inferred from posting -- amounts as in hledger < 0.28. journalApplyCommodityStyles :: Journal -> Journal journalApplyCommodityStyles j@Journal{jtxns=ts, jmarketprices=mps} = j'' where j' = journalInferCommodityStyles j j'' = j'{jtxns=map fixtransaction ts, jmarketprices=map fixmarketprice mps} fixtransaction t@Transaction{tpostings=ps} = t{tpostings=map fixposting ps} fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a} fixmarketprice mp@MarketPrice{mpamount=a} = mp{mpamount=fixamount a} fixmixedamount (Mixed as) = Mixed $ map fixamount as fixamount a@Amount{acommodity=c} = a{astyle=journalCommodityStyle j' c} -- | Get this journal's standard display style for the given -- commodity. That is the style defined by the last corresponding -- commodity format directive if any, otherwise the style inferred -- from the posting amounts (or in some cases, price amounts) in this -- commodity if any, otherwise the default style. journalCommodityStyle :: Journal -> CommoditySymbol -> AmountStyle journalCommodityStyle j c = headDef amountstyle{asprecision=2} $ catMaybes [ M.lookup c (jcommodities j) >>= cformat ,M.lookup c $ jinferredcommodities j ] -- | Infer a display format for each commodity based on the amounts parsed. -- "hledger... will use the format of the first posting amount in the -- commodity, and the highest precision of all posting amounts in the commodity." journalInferCommodityStyles :: Journal -> Journal journalInferCommodityStyles j = j{jinferredcommodities = commodityStylesFromAmounts $ dbg8 "journalChooseCommmodityStyles using amounts" $ journalAmounts j} -- | Given a list of amounts in parse order, build a map from their commodity names -- to standard commodity display formats. commodityStylesFromAmounts :: [Amount] -> M.Map CommoditySymbol AmountStyle commodityStylesFromAmounts amts = M.fromList commstyles where samecomm = \a1 a2 -> acommodity a1 == acommodity a2 commamts = [(acommodity $ head as, as) | as <- groupBy samecomm $ sortBy (comparing acommodity) amts] commstyles = [(c, canonicalStyleFrom $ map astyle as) | (c,as) <- commamts] -- | Given an ordered list of amount styles, choose a canonical style. -- That is: the style of the first, and the -- maximum precision of all. canonicalStyleFrom :: [AmountStyle] -> AmountStyle canonicalStyleFrom [] = amountstyle canonicalStyleFrom ss@(first:_) = first{asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} where mgrps = maybe Nothing Just $ headMay $ catMaybes $ map asdigitgroups ss -- precision is maximum of all precisions prec = maximumStrict $ map asprecision ss mdec = Just $ headDef '.' $ catMaybes $ map asdecimalpoint ss -- precision is that of first amount with a decimal point -- (mdec, prec) = -- case filter (isJust . asdecimalpoint) ss of -- (s:_) -> (asdecimalpoint s, asprecision s) -- [] -> (Just '.', 0) -- -- | Apply this journal's historical price records to unpriced amounts where possible. -- journalApplyMarketPrices :: Journal -> Journal -- journalApplyMarketPrices j@Journal{jtxns=ts} = j{jtxns=map fixtransaction ts} -- where -- fixtransaction t@Transaction{tdate=d, tpostings=ps} = t{tpostings=map fixposting ps} -- where -- fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a} -- fixmixedamount (Mixed as) = Mixed $ map fixamount as -- fixamount = fixprice -- fixprice a@Amount{price=Just _} = a -- fixprice a@Amount{commodity=c} = a{price=maybe Nothing (Just . UnitPrice) $ journalMarketPriceFor j d c} -- -- | Get the price for a commodity on the specified day from the price database, if known. -- -- Does only one lookup step, ie will not look up the price of a price. -- journalMarketPriceFor :: Journal -> Day -> CommoditySymbol -> Maybe MixedAmount -- journalMarketPriceFor j d CommoditySymbol{symbol=s} = do -- let ps = reverse $ filter ((<= d).mpdate) $ filter ((s==).hsymbol) $ sortBy (comparing mpdate) $ jmarketprices j -- case ps of (MarketPrice{mpamount=a}:_) -> Just a -- _ -> Nothing -- | Convert all this journal's amounts to cost by applying their prices, if any. journalConvertAmountsToCost :: Journal -> Journal journalConvertAmountsToCost j@Journal{jtxns=ts} = j{jtxns=map fixtransaction ts} where -- similar to journalApplyCommodityStyles fixtransaction t@Transaction{tpostings=ps} = t{tpostings=map fixposting ps} fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a} fixmixedamount (Mixed as) = Mixed $ map fixamount as fixamount = canonicaliseAmount (jinferredcommodities j) . costOfAmount -- -- | Get this journal's unique, display-preference-canonicalised commodities, by symbol. -- journalCanonicalCommodities :: Journal -> M.Map String CommoditySymbol -- journalCanonicalCommodities j = canonicaliseCommodities $ journalAmountCommodities j -- -- | Get all this journal's amounts' commodities, in the order parsed. -- journalAmountCommodities :: Journal -> [CommoditySymbol] -- journalAmountCommodities = map acommodity . concatMap amounts . journalAmounts -- -- | Get all this journal's amount and price commodities, in the order parsed. -- journalAmountAndPriceCommodities :: Journal -> [CommoditySymbol] -- journalAmountAndPriceCommodities = concatMap amountCommodities . concatMap amounts . journalAmounts -- -- | Get this amount's commodity and any commodities referenced in its price. -- amountCommodities :: Amount -> [CommoditySymbol] -- amountCommodities Amount{acommodity=c,aprice=p} = -- case p of Nothing -> [c] -- Just (UnitPrice ma) -> c:(concatMap amountCommodities $ amounts ma) -- Just (TotalPrice ma) -> c:(concatMap amountCommodities $ amounts ma) -- | Get an ordered list of the amounts in this journal which will -- influence amount style canonicalisation. These are: -- -- * amounts in market price directives (in parse order) -- * amounts in postings (in parse order) -- -- Amounts in default commodity directives also influence -- canonicalisation, but earlier, as amounts are parsed. -- Amounts in posting prices are not used for canonicalisation. -- journalAmounts :: Journal -> [Amount] journalAmounts = getConst . traverseJournalAmounts (Const . (:[])) -- | Maps over all of the amounts in the journal overJournalAmounts :: (Amount -> Amount) -> Journal -> Journal overJournalAmounts f = runIdentity . traverseJournalAmounts (Identity . f) -- | Traverses over all ofthe amounts in the journal, in the order -- indicated by 'journalAmounts'. traverseJournalAmounts :: Applicative f => (Amount -> f Amount) -> Journal -> f Journal traverseJournalAmounts f j = recombine <$> (traverse . mpa) f (jmarketprices j) <*> (traverse . tp . traverse . pamt . maa . traverse) f (jtxns j) where recombine mps txns = j { jmarketprices = mps, jtxns = txns } -- a bunch of traversals mpa g mp = (\amt -> mp { mpamount = amt }) <$> g (mpamount mp) tp g t = (\ps -> t { tpostings = ps }) <$> g (tpostings t) pamt g p = (\amt -> p { pamount = amt }) <$> g (pamount p) maa g (Mixed as) = Mixed <$> g as -- | The fully specified date span enclosing the dates (primary or secondary) -- of all this journal's transactions and postings, or DateSpan Nothing Nothing -- if there are none. journalDateSpan :: Bool -> Journal -> DateSpan journalDateSpan secondary j | null ts = DateSpan Nothing Nothing | otherwise = DateSpan (Just earliest) (Just $ addDays 1 latest) where earliest = minimumStrict dates latest = maximumStrict dates dates = pdates ++ tdates tdates = map (if secondary then transactionDate2 else tdate) ts pdates = concatMap (catMaybes . map (if secondary then (Just . postingDate2) else pdate) . tpostings) ts ts = jtxns j -- #ifdef TESTS test_journalDateSpan = do "journalDateSpan" ~: do assertEqual "" (DateSpan (Just $ fromGregorian 2014 1 10) (Just $ fromGregorian 2014 10 11)) (journalDateSpan True j) where j = nulljournal{jtxns = [nulltransaction{tdate = parsedate "2014/02/01" ,tpostings = [posting{pdate=Just (parsedate "2014/01/10")}] } ,nulltransaction{tdate = parsedate "2014/09/01" ,tpostings = [posting{pdate2=Just (parsedate "2014/10/10")}] } ]} -- #endif -- Misc helpers -- | Check if a set of hledger account/description filter patterns matches the -- given account name or entry description. Patterns are case-insensitive -- regular expressions. Prefixed with not:, they become anti-patterns. matchpats :: [String] -> String -> Bool matchpats pats str = (null positives || any match positives) && (null negatives || not (any match negatives)) where (negatives,positives) = partition isnegativepat pats match "" = True match pat = regexMatchesCI (abspat pat) str negateprefix = "not:" isnegativepat = (negateprefix `isPrefixOf`) abspat pat = if isnegativepat pat then drop (length negateprefix) pat else pat -- debug helpers -- traceAmountPrecision a = trace (show $ map (precision . acommodity) $ amounts a) a -- tracePostingsCommodities ps = trace (show $ map ((map (precision . acommodity) . amounts) . pamount) ps) ps -- tests -- A sample journal for testing, similar to examples/sample.journal: -- -- 2008/01/01 income -- assets:bank:checking $1 -- income:salary -- -- 2008/06/01 gift -- assets:bank:checking $1 -- income:gifts -- -- 2008/06/02 save -- assets:bank:saving $1 -- assets:bank:checking -- -- 2008/06/03 * eat & shop -- expenses:food $1 -- expenses:supplies $1 -- assets:cash -- -- 2008/12/31 * pay off -- liabilities:debts $1 -- assets:bank:checking -- Right samplejournal = journalBalanceTransactions False $ nulljournal {jtxns = [ txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/01/01", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="income", tcomment="", ttags=[], tpostings= ["assets:bank:checking" `post` usd 1 ,"income:salary" `post` missingamt ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/06/01", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="gift", tcomment="", ttags=[], tpostings= ["assets:bank:checking" `post` usd 1 ,"income:gifts" `post` missingamt ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/06/02", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="save", tcomment="", ttags=[], tpostings= ["assets:bank:saving" `post` usd 1 ,"assets:bank:checking" `post` usd (-1) ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/06/03", tdate2=Nothing, tstatus=Cleared, tcode="", tdescription="eat & shop", tcomment="", ttags=[], tpostings=["expenses:food" `post` usd 1 ,"expenses:supplies" `post` usd 1 ,"assets:cash" `post` missingamt ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/12/31", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="pay off", tcomment="", ttags=[], tpostings=["liabilities:debts" `post` usd 1 ,"assets:bank:checking" `post` usd (-1) ], tpreceding_comment_lines="" } ] } tests_Hledger_Data_Journal = TestList $ [ test_journalDateSpan -- "query standard account types" ~: -- do -- let j = journal1 -- journalBalanceSheetAccountNames j `is` ["assets","assets:a","equity","equity:q","equity:q:qq","liabilities","liabilities:l"] -- journalProfitAndLossAccountNames j `is` ["expenses","expenses:e","income","income:i"] ] hledger-lib-1.2/Hledger/Data/Ledger.hs0000644000000000000000000000725013035210046015677 0ustar0000000000000000{-| A 'Ledger' is derived from a 'Journal' by applying a filter specification to select 'Transaction's and 'Posting's of interest. It contains the filtered journal and knows the resulting chart of accounts, account balances, and postings in each account. -} module Hledger.Data.Ledger where import qualified Data.Map as M -- import Data.Text (Text) import qualified Data.Text as T import Safe (headDef) import Test.HUnit import Text.Printf import Hledger.Data.Types import Hledger.Data.Account import Hledger.Data.Journal import Hledger.Data.Posting import Hledger.Query instance Show Ledger where show l = printf "Ledger with %d transactions, %d accounts\n" --"%s" (length (jtxns $ ljournal l) + length (jmodifiertxns $ ljournal l) + length (jperiodictxns $ ljournal l)) (length $ ledgerAccountNames l) -- (showtree $ ledgerAccountNameTree l) nullledger :: Ledger nullledger = Ledger { ljournal = nulljournal, laccounts = [] } -- | Filter a journal's transactions with the given query, then derive -- a ledger containing the chart of accounts and balances. If the -- query includes a depth limit, that will affect the ledger's -- journal but not the ledger's account tree. ledgerFromJournal :: Query -> Journal -> Ledger ledgerFromJournal q j = nullledger{ljournal=j'', laccounts=as} where (q',depthq) = (filterQuery (not . queryIsDepth) q, filterQuery queryIsDepth q) j' = filterJournalAmounts (filterQuery queryIsSym q) $ -- remove amount parts which the query's sym: terms would exclude filterJournalPostings q' j as = accountsFromPostings $ journalPostings j' j'' = filterJournalPostings depthq j' -- | List a ledger's account names. ledgerAccountNames :: Ledger -> [AccountName] ledgerAccountNames = drop 1 . map aname . laccounts -- | Get the named account from a ledger. ledgerAccount :: Ledger -> AccountName -> Maybe Account ledgerAccount l a = lookupAccount a $ laccounts l -- | Get this ledger's root account, which is a dummy "root" account -- above all others. This should always be first in the account list, -- if somehow not this returns a null account. ledgerRootAccount :: Ledger -> Account ledgerRootAccount = headDef nullacct . laccounts -- | List a ledger's top-level accounts (the ones below the root), in tree order. ledgerTopAccounts :: Ledger -> [Account] ledgerTopAccounts = asubs . head . laccounts -- | List a ledger's bottom-level (subaccount-less) accounts, in tree order. ledgerLeafAccounts :: Ledger -> [Account] ledgerLeafAccounts = filter (null.asubs) . laccounts -- | Accounts in ledger whose name matches the pattern, in tree order. ledgerAccountsMatching :: [String] -> Ledger -> [Account] ledgerAccountsMatching pats = filter (matchpats pats . T.unpack . aname) . laccounts -- XXX pack -- | List a ledger's postings, in the order parsed. ledgerPostings :: Ledger -> [Posting] ledgerPostings = journalPostings . ljournal -- | The (fully specified) date span containing all the ledger's (filtered) transactions, -- or DateSpan Nothing Nothing if there are none. ledgerDateSpan :: Ledger -> DateSpan ledgerDateSpan = postingsDateSpan . ledgerPostings -- | All commodities used in this ledger. ledgerCommodities :: Ledger -> [CommoditySymbol] ledgerCommodities = M.keys . jinferredcommodities . ljournal tests_ledgerFromJournal = [ "ledgerFromJournal" ~: do assertEqual "" (0) (length $ ledgerPostings $ ledgerFromJournal Any nulljournal) assertEqual "" (11) (length $ ledgerPostings $ ledgerFromJournal Any samplejournal) assertEqual "" (6) (length $ ledgerPostings $ ledgerFromJournal (Depth 2) samplejournal) ] tests_Hledger_Data_Ledger = TestList $ tests_ledgerFromJournal hledger-lib-1.2/Hledger/Data/MarketPrice.hs0000644000000000000000000000160313066173044016711 0ustar0000000000000000{-| A 'MarketPrice' represents a historical exchange rate between two commodities. (Ledger calls them historical prices.) For example, prices published by a stock exchange or the foreign exchange market. Some commands (balance, currently) can use this information to show the market value of things at a given date. -} {-# LANGUAGE OverloadedStrings, LambdaCase #-} module Hledger.Data.MarketPrice where import qualified Data.Text as T import Test.HUnit import Hledger.Data.Amount import Hledger.Data.Dates import Hledger.Data.Types -- | Get the string representation of an market price, based on its -- commodity's display settings. showMarketPrice :: MarketPrice -> String showMarketPrice mp = unwords [ "P" , showDate (mpdate mp) , T.unpack (mpcommodity mp) , (showAmount . setAmountPrecision maxprecision) (mpamount mp) ] tests_Hledger_Data_MarketPrice = TestList [] hledger-lib-1.2/Hledger/Data/Period.hs0000644000000000000000000002756013035510426015732 0ustar0000000000000000{-| Manipulate the time periods typically used for reports with Period, a richer abstraction than DateSpan. See also Types and Dates. -} module Hledger.Data.Period where import Data.Time.Calendar import Data.Time.Calendar.MonthDay import Data.Time.Calendar.OrdinalDate import Data.Time.Calendar.WeekDate import Data.Time.Format import Text.Printf import Hledger.Data.Types -- | Convert Periods to DateSpans. -- -- >>> periodAsDateSpan (MonthPeriod 2000 1) == DateSpan (Just $ fromGregorian 2000 1 1) (Just $ fromGregorian 2000 2 1) -- True periodAsDateSpan :: Period -> DateSpan periodAsDateSpan (DayPeriod d) = DateSpan (Just d) (Just $ addDays 1 d) periodAsDateSpan (WeekPeriod b) = DateSpan (Just b) (Just $ addDays 7 b) periodAsDateSpan (MonthPeriod y m) = DateSpan (Just $ fromGregorian y m 1) (Just $ fromGregorian y' m' 1) where (y',m') | m==12 = (y+1,1) | otherwise = (y,m+1) periodAsDateSpan (QuarterPeriod y q) = DateSpan (Just $ fromGregorian y m 1) (Just $ fromGregorian y' m' 1) where (y', q') | q==4 = (y+1,1) | otherwise = (y,q+1) quarterAsMonth q = (q-1) * 3 + 1 m = quarterAsMonth q m' = quarterAsMonth q' periodAsDateSpan (YearPeriod y) = DateSpan (Just $ fromGregorian y 1 1) (Just $ fromGregorian (y+1) 1 1) periodAsDateSpan (PeriodBetween b e) = DateSpan (Just b) (Just e) periodAsDateSpan (PeriodFrom b) = DateSpan (Just b) Nothing periodAsDateSpan (PeriodTo e) = DateSpan Nothing (Just e) periodAsDateSpan (PeriodAll) = DateSpan Nothing Nothing -- | Convert DateSpans to Periods. -- -- >>> dateSpanAsPeriod $ DateSpan (Just $ fromGregorian 2000 1 1) (Just $ fromGregorian 2000 2 1) -- MonthPeriod 2000 1 dateSpanAsPeriod :: DateSpan -> Period dateSpanAsPeriod (DateSpan (Just b) (Just e)) = simplifyPeriod $ PeriodBetween b e dateSpanAsPeriod (DateSpan (Just b) Nothing) = PeriodFrom b dateSpanAsPeriod (DateSpan Nothing (Just e)) = PeriodTo e dateSpanAsPeriod (DateSpan Nothing Nothing) = PeriodAll -- | Convert PeriodBetweens to a more abstract period where possible. -- -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 1 1 1) (fromGregorian 2 1 1) -- YearPeriod 1 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 10 1) (fromGregorian 2001 1 1) -- QuarterPeriod 2000 4 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 1) (fromGregorian 2000 3 1) -- MonthPeriod 2000 2 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2016 7 25) (fromGregorian 2016 8 1) -- WeekPeriod 2016-07-25 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 1 1) (fromGregorian 2000 1 2) -- DayPeriod 2000-01-01 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 28) (fromGregorian 2000 3 1) -- PeriodBetween 2000-02-28 2000-03-01 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 29) (fromGregorian 2000 3 1) -- DayPeriod 2000-02-29 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 12 31) (fromGregorian 2001 1 1) -- DayPeriod 2000-12-31 -- simplifyPeriod :: Period -> Period simplifyPeriod (PeriodBetween b e) = case (toGregorian b, toGregorian e) of -- a year ((by,1,1), (ey,1,1)) | by+1==ey -> YearPeriod by -- a half-year -- ((by,1,1), (ey,7,1)) | by==ey -> -- ((by,7,1), (ey,1,1)) | by+1==ey -> -- a quarter ((by,1,1), (ey,4,1)) | by==ey -> QuarterPeriod by 1 ((by,4,1), (ey,7,1)) | by==ey -> QuarterPeriod by 2 ((by,7,1), (ey,10,1)) | by==ey -> QuarterPeriod by 3 ((by,10,1), (ey,1,1)) | by+1==ey -> QuarterPeriod by 4 -- a month ((by,bm,1), (ey,em,1)) | by==ey && bm+1==em -> MonthPeriod by bm ((by,12,1), (ey,1,1)) | by+1==ey -> MonthPeriod by 12 -- a week (two successive mondays), -- YYYYwN ("week N of year YYYY") -- _ | let ((by,bw,bd), (ey,ew,ed)) = (toWeekDate from, toWeekDate to) in by==ey && fw+1==tw && bd==1 && ed==1 -> -- a week starting on a monday _ | let ((by,bw,bd), (ey,ew,ed)) = (toWeekDate b, toWeekDate (addDays (-1) e)) in by==ey && bw==ew && bd==1 && ed==7 -> WeekPeriod b -- a day ((by,bm,bd), (ey,em,ed)) | (by==ey && bm==em && bd+1==ed) || (by+1==ey && bm==12 && em==1 && bd==31 && ed==1) || -- crossing a year boundary (by==ey && bm+1==em && isLastDayOfMonth by bm bd && ed==1) -- crossing a month boundary -> DayPeriod b _ -> PeriodBetween b e simplifyPeriod p = p isLastDayOfMonth y m d = case m of 1 -> d==31 2 | isLeapYear y -> d==29 | otherwise -> d==28 3 -> d==31 4 -> d==30 5 -> d==31 6 -> d==30 7 -> d==31 8 -> d==31 9 -> d==30 10 -> d==31 11 -> d==30 12 -> d==31 _ -> False -- | Is this period a "standard" period, referencing a particular day, week, month, quarter, or year ? -- Periods of other durations, or infinite duration, or not starting on a standard period boundary, are not. isStandardPeriod = isStandardPeriod' . simplifyPeriod where isStandardPeriod' (DayPeriod _) = True isStandardPeriod' (WeekPeriod _) = True isStandardPeriod' (MonthPeriod _ _) = True isStandardPeriod' (QuarterPeriod _ _) = True isStandardPeriod' (YearPeriod _) = True isStandardPeriod' _ = False -- | Render a period as a compact display string suitable for user output. -- -- >>> showPeriod (WeekPeriod (fromGregorian 2016 7 25)) -- "2016/07/25w30" showPeriod (DayPeriod b) = formatTime defaultTimeLocale "%0C%y/%m/%d" b -- DATE showPeriod (WeekPeriod b) = formatTime defaultTimeLocale "%0C%y/%m/%dw%V" b -- STARTDATEwYEARWEEK showPeriod (MonthPeriod y m) = printf "%04d/%02d" y m -- YYYY/MM showPeriod (QuarterPeriod y q) = printf "%04dq%d" y q -- YYYYqN showPeriod (YearPeriod y) = printf "%04d" y -- YYYY showPeriod (PeriodBetween b e) = formatTime defaultTimeLocale "%0C%y/%m/%d" b ++ formatTime defaultTimeLocale "-%0C%y/%m/%d" (addDays (-1) e) -- STARTDATE-INCLUSIVEENDDATE showPeriod (PeriodFrom b) = formatTime defaultTimeLocale "%0C%y/%m/%d-" b -- STARTDATE- showPeriod (PeriodTo e) = formatTime defaultTimeLocale "-%0C%y/%m/%d" (addDays (-1) e) -- -INCLUSIVEENDDATE showPeriod PeriodAll = "-" periodStart :: Period -> Maybe Day periodStart p = mb where DateSpan mb _ = periodAsDateSpan p periodEnd :: Period -> Maybe Day periodEnd p = me where DateSpan _ me = periodAsDateSpan p -- | Move a standard period to the following period of same duration. -- Non-standard periods are unaffected. periodNext :: Period -> Period periodNext (DayPeriod b) = DayPeriod (addDays 1 b) periodNext (WeekPeriod b) = WeekPeriod (addDays 7 b) periodNext (MonthPeriod y 12) = MonthPeriod (y+1) 1 periodNext (MonthPeriod y m) = MonthPeriod y (m+1) periodNext (QuarterPeriod y 4) = QuarterPeriod (y+1) 1 periodNext (QuarterPeriod y q) = QuarterPeriod y (q+1) periodNext (YearPeriod y) = YearPeriod (y+1) periodNext p = p -- | Move a standard period to the preceding period of same duration. -- Non-standard periods are unaffected. periodPrevious :: Period -> Period periodPrevious (DayPeriod b) = DayPeriod (addDays (-1) b) periodPrevious (WeekPeriod b) = WeekPeriod (addDays (-7) b) periodPrevious (MonthPeriod y 1) = MonthPeriod (y-1) 12 periodPrevious (MonthPeriod y m) = MonthPeriod y (m-1) periodPrevious (QuarterPeriod y 1) = QuarterPeriod (y-1) 4 periodPrevious (QuarterPeriod y q) = QuarterPeriod y (q-1) periodPrevious (YearPeriod y) = YearPeriod (y-1) periodPrevious p = p -- | Move a standard period to the following period of same duration, staying within enclosing dates. -- Non-standard periods are unaffected. periodNextIn :: DateSpan -> Period -> Period periodNextIn (DateSpan _ (Just e)) p = case mb of Just b -> if b < e then p' else p _ -> p where p' = periodNext p mb = periodStart p' periodNextIn _ p = periodNext p -- | Move a standard period to the preceding period of same duration, staying within enclosing dates. -- Non-standard periods are unaffected. periodPreviousIn :: DateSpan -> Period -> Period periodPreviousIn (DateSpan (Just b) _) p = case me of Just e -> if e > b then p' else p _ -> p where p' = periodPrevious p me = periodEnd p' periodPreviousIn _ p = periodPrevious p -- | Move a standard period stepwise so that it encloses the given date. -- Non-standard periods are unaffected. periodMoveTo :: Day -> Period -> Period periodMoveTo d (DayPeriod _) = DayPeriod d periodMoveTo d (WeekPeriod _) = WeekPeriod $ mondayBefore d periodMoveTo d (MonthPeriod _ _) = MonthPeriod y m where (y,m,_) = toGregorian d periodMoveTo d (QuarterPeriod _ _) = QuarterPeriod y q where (y,m,_) = toGregorian d q = quarterContainingMonth m periodMoveTo d (YearPeriod _) = YearPeriod y where (y,_,_) = toGregorian d periodMoveTo _ p = p -- | Enlarge a standard period to the next larger enclosing standard period, if there is one. -- Eg, a day becomes the enclosing week. -- A week becomes whichever month the week's thursday falls into. -- A year becomes all (unlimited). -- Non-standard periods (arbitrary dates, or open-ended) are unaffected. periodGrow :: Period -> Period periodGrow (DayPeriod b) = WeekPeriod $ mondayBefore b periodGrow (WeekPeriod b) = MonthPeriod y m where (y,m) = yearMonthContainingWeekStarting b periodGrow (MonthPeriod y m) = QuarterPeriod y (quarterContainingMonth m) periodGrow (QuarterPeriod y _) = YearPeriod y periodGrow (YearPeriod _) = PeriodAll periodGrow p = p -- | Shrink a period to the next smaller standard period inside it, -- choosing the subperiod which contains today's date if possible, -- otherwise the first subperiod. It goes like this: -- unbounded periods and nonstandard periods (between two arbitrary dates) -> -- current year -> -- current quarter if it's in selected year, otherwise first quarter of selected year -> -- current month if it's in selected quarter, otherwise first month of selected quarter -> -- current week if it's in selected month, otherwise first week of selected month -> -- today if it's in selected week, otherwise first day of selected week, -- unless that's in previous month, in which case first day of month containing selected week. -- Shrinking a day has no effect. periodShrink :: Day -> Period -> Period periodShrink _ p@(DayPeriod _) = p periodShrink today (WeekPeriod b) | today >= b && diffDays today b < 7 = DayPeriod today | m /= weekmonth = DayPeriod $ fromGregorian weekyear weekmonth 1 | otherwise = DayPeriod b where (_,m,_) = toGregorian b (weekyear,weekmonth) = yearMonthContainingWeekStarting b periodShrink today (MonthPeriod y m) | (y',m') == (y,m) = WeekPeriod $ mondayBefore today | otherwise = WeekPeriod $ startOfFirstWeekInMonth y m where (y',m',_) = toGregorian today periodShrink today (QuarterPeriod y q) | quarterContainingMonth thismonth == q = MonthPeriod y thismonth | otherwise = MonthPeriod y (firstMonthOfQuarter q) where (_,thismonth,_) = toGregorian today periodShrink today (YearPeriod y) | y == thisyear = QuarterPeriod y thisquarter | otherwise = QuarterPeriod y 1 where (thisyear,thismonth,_) = toGregorian today thisquarter = quarterContainingMonth thismonth periodShrink today _ = YearPeriod y where (y,_,_) = toGregorian today mondayBefore d = addDays (fromIntegral (1 - wd)) d where (_,_,wd) = toWeekDate d yearMonthContainingWeekStarting weekstart = (y,m) where thu = addDays 3 weekstart (y,yd) = toOrdinalDate thu (m,_) = dayOfYearToMonthAndDay (isLeapYear y) yd quarterContainingMonth m = (m-1) `div` 3 + 1 firstMonthOfQuarter q = (q-1)*3 + 1 startOfFirstWeekInMonth y m | monthstartday <= 4 = mon | otherwise = addDays 7 mon -- month starts with a fri/sat/sun where monthstart = fromGregorian y m 1 mon = mondayBefore monthstart (_,_,monthstartday) = toWeekDate monthstart hledger-lib-1.2/Hledger/Data/Posting.hs0000644000000000000000000003000113066173044016120 0ustar0000000000000000{-| A 'Posting' represents a change (by some 'MixedAmount') of the balance in some 'Account'. Each 'Transaction' contains two or more postings which should add up to 0. Postings reference their parent transaction, so we can look up the date or description there. -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Posting ( -- * Posting nullposting, posting, post, -- * operations originalPosting, postingStatus, isReal, isVirtual, isBalancedVirtual, isEmptyPosting, isAssignment, hasAmount, postingAllTags, transactionAllTags, postingAllImplicitTags, relatedPostings, removePrices, -- * date operations postingDate, postingDate2, isPostingInDateSpan, isPostingInDateSpan', postingsDateSpan, postingsDateSpan', -- * account name operations accountNamesFromPostings, accountNamePostingType, accountNameWithoutPostingType, accountNameWithPostingType, joinAccountNames, concatAccountNames, accountNameApplyAliases, accountNameApplyAliasesMemo, -- * transaction description operations transactionPayee, transactionNote, payeeAndNoteFromDescription, -- * arithmetic sumPostings, -- * rendering showPosting, -- * misc. showComment, tests_Hledger_Data_Posting ) where import Data.List import Data.Maybe import Data.MemoUgly (memo) import Data.Monoid import Data.Ord import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Safe import Test.HUnit import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Amount import Hledger.Data.AccountName import Hledger.Data.Dates (nulldate, spanContainsDate) instance Show Posting where show = showPosting nullposting, posting :: Posting nullposting = Posting {pdate=Nothing ,pdate2=Nothing ,pstatus=Uncleared ,paccount="" ,pamount=nullmixedamt ,pcomment="" ,ptype=RegularPosting ,ptags=[] ,pbalanceassertion=Nothing ,ptransaction=Nothing ,porigin=Nothing } posting = nullposting post :: AccountName -> Amount -> Posting post acct amt = posting {paccount=acct, pamount=Mixed [amt]} originalPosting :: Posting -> Posting originalPosting p = fromMaybe p $ porigin p -- XXX once rendered user output, but just for debugging now; clean up showPosting :: Posting -> String showPosting p@Posting{paccount=a,pamount=amt,ptype=t} = unlines $ [concatTopPadded [show (postingDate p) ++ " ", showaccountname a ++ " ", showamount amt, showComment (pcomment p)]] where ledger3ishlayout = False acctnamewidth = if ledger3ishlayout then 25 else 22 showaccountname = fitString (Just acctnamewidth) Nothing False False . bracket . T.unpack . elideAccountName width (bracket,width) = case t of BalancedVirtualPosting -> (\s -> "["++s++"]", acctnamewidth-2) VirtualPosting -> (\s -> "("++s++")", acctnamewidth-2) _ -> (id,acctnamewidth) showamount = padLeftWide 12 . showMixedAmount showComment :: Text -> String showComment t = if T.null t then "" else " ;" ++ T.unpack t isReal :: Posting -> Bool isReal p = ptype p == RegularPosting isVirtual :: Posting -> Bool isVirtual p = ptype p == VirtualPosting isBalancedVirtual :: Posting -> Bool isBalancedVirtual p = ptype p == BalancedVirtualPosting hasAmount :: Posting -> Bool hasAmount = (/= missingmixedamt) . pamount isAssignment :: Posting -> Bool isAssignment p = not (hasAmount p) && isJust (pbalanceassertion p) accountNamesFromPostings :: [Posting] -> [AccountName] accountNamesFromPostings = nub . map paccount sumPostings :: [Posting] -> MixedAmount sumPostings = sumStrict . map pamount -- | Remove all prices of a posting removePrices :: Posting -> Posting removePrices p = p{ pamount = Mixed $ remove <$> amounts (pamount p) } where remove a = a { aprice = NoPrice } -- | Get a posting's (primary) date - it's own primary date if specified, -- otherwise the parent transaction's primary date, or the null date if -- there is no parent transaction. postingDate :: Posting -> Day postingDate p = fromMaybe txndate $ pdate p where txndate = maybe nulldate tdate $ ptransaction p -- | Get a posting's secondary (secondary) date, which is the first of: -- posting's secondary date, transaction's secondary date, posting's -- primary date, transaction's primary date, or the null date if there is -- no parent transaction. postingDate2 :: Posting -> Day postingDate2 p = headDef nulldate $ catMaybes dates where dates = [pdate2 p ,maybe Nothing tdate2 $ ptransaction p ,pdate p ,maybe Nothing (Just . tdate) $ ptransaction p ] -- | Get a posting's cleared status: cleared or pending if those are -- explicitly set, otherwise the cleared status of its parent -- transaction, or uncleared if there is no parent transaction. (Note -- Uncleared's ambiguity, it can mean "uncleared" or "don't know". postingStatus :: Posting -> ClearedStatus postingStatus Posting{pstatus=s, ptransaction=mt} | s == Uncleared = case mt of Just t -> tstatus t Nothing -> Uncleared | otherwise = s -- | Implicit tags for this transaction. transactionImplicitTags :: Transaction -> [Tag] transactionImplicitTags t = filter (not . T.null . snd) [("code", tcode t) ,("description", tdescription t) ,("payee", transactionPayee t) ,("note", transactionNote t) ] transactionPayee :: Transaction -> Text transactionPayee = fst . payeeAndNoteFromDescription . tdescription transactionNote :: Transaction -> Text transactionNote = fst . payeeAndNoteFromDescription . tdescription -- | Parse a transaction's description into payee and note (aka narration) fields, -- assuming a convention of separating these with | (like Beancount). -- Ie, everything up to the first | is the payee, everything after it is the note. -- When there's no |, payee == note == description. payeeAndNoteFromDescription :: Text -> (Text,Text) payeeAndNoteFromDescription t = (textstrip p, textstrip $ T.tail n) where (p,n) = T.breakOn "|" t -- | Tags for this posting including implicit and any inherited from its parent transaction. postingAllImplicitTags :: Posting -> [Tag] postingAllImplicitTags p = ptags p ++ maybe [] transactionTags (ptransaction p) where transactionTags t = ttags t ++ transactionImplicitTags t -- | Tags for this posting including any inherited from its parent transaction. postingAllTags :: Posting -> [Tag] postingAllTags p = ptags p ++ maybe [] ttags (ptransaction p) -- | Tags for this transaction including any from its postings. transactionAllTags :: Transaction -> [Tag] transactionAllTags t = ttags t ++ concatMap ptags (tpostings t) -- Get the other postings from this posting's transaction. relatedPostings :: Posting -> [Posting] relatedPostings p@Posting{ptransaction=Just t} = filter (/= p) $ tpostings t relatedPostings _ = [] -- | Does this posting fall within the given date span ? isPostingInDateSpan :: DateSpan -> Posting -> Bool isPostingInDateSpan s = spanContainsDate s . postingDate -- --date2-sensitive version, separate for now to avoid disturbing multiBalanceReport. isPostingInDateSpan' :: WhichDate -> DateSpan -> Posting -> Bool isPostingInDateSpan' PrimaryDate s = spanContainsDate s . postingDate isPostingInDateSpan' SecondaryDate s = spanContainsDate s . postingDate2 isEmptyPosting :: Posting -> Bool isEmptyPosting = isZeroMixedAmount . pamount -- | Get the minimal date span which contains all the postings, or the -- null date span if there are none. postingsDateSpan :: [Posting] -> DateSpan postingsDateSpan [] = DateSpan Nothing Nothing postingsDateSpan ps = DateSpan (Just $ postingDate $ head ps') (Just $ addDays 1 $ postingDate $ last ps') where ps' = sortBy (comparing postingDate) ps -- --date2-sensitive version, as above. postingsDateSpan' :: WhichDate -> [Posting] -> DateSpan postingsDateSpan' _ [] = DateSpan Nothing Nothing postingsDateSpan' wd ps = DateSpan (Just $ postingdate $ head ps') (Just $ addDays 1 $ postingdate $ last ps') where ps' = sortBy (comparing postingdate) ps postingdate = if wd == PrimaryDate then postingDate else postingDate2 -- AccountName stuff that depends on PostingType accountNamePostingType :: AccountName -> PostingType accountNamePostingType a | T.null a = RegularPosting | T.head a == '[' && T.last a == ']' = BalancedVirtualPosting | T.head a == '(' && T.last a == ')' = VirtualPosting | otherwise = RegularPosting accountNameWithoutPostingType :: AccountName -> AccountName accountNameWithoutPostingType a = case accountNamePostingType a of BalancedVirtualPosting -> T.init $ T.tail a VirtualPosting -> T.init $ T.tail a RegularPosting -> a accountNameWithPostingType :: PostingType -> AccountName -> AccountName accountNameWithPostingType BalancedVirtualPosting a = "["<>accountNameWithoutPostingType a<>"]" accountNameWithPostingType VirtualPosting a = "("<>accountNameWithoutPostingType a<>")" accountNameWithPostingType RegularPosting a = accountNameWithoutPostingType a -- | Prefix one account name to another, preserving posting type -- indicators like concatAccountNames. joinAccountNames :: AccountName -> AccountName -> AccountName joinAccountNames a b = concatAccountNames $ filter (not . T.null) [a,b] -- | Join account names into one. If any of them has () or [] posting type -- indicators, these (the first type encountered) will also be applied to -- the resulting account name. concatAccountNames :: [AccountName] -> AccountName concatAccountNames as = accountNameWithPostingType t $ T.intercalate ":" $ map accountNameWithoutPostingType as where t = headDef RegularPosting $ filter (/= RegularPosting) $ map accountNamePostingType as -- | Rewrite an account name using all matching aliases from the given list, in sequence. -- Each alias sees the result of applying the previous aliases. accountNameApplyAliases :: [AccountAlias] -> AccountName -> AccountName accountNameApplyAliases aliases a = accountNameWithPostingType atype aname' where (aname,atype) = (accountNameWithoutPostingType a, accountNamePostingType a) aname' = foldl (\acct alias -> dbg6 "result" $ aliasReplace (dbg6 "alias" alias) (dbg6 "account" acct)) aname aliases -- | Memoising version of accountNameApplyAliases, maybe overkill. accountNameApplyAliasesMemo :: [AccountAlias] -> AccountName -> AccountName accountNameApplyAliasesMemo aliases = memo (accountNameApplyAliases aliases) -- aliasMatches :: AccountAlias -> AccountName -> Bool -- aliasMatches (BasicAlias old _) a = old `isAccountNamePrefixOf` a -- aliasMatches (RegexAlias re _) a = regexMatchesCI re a aliasReplace :: AccountAlias -> AccountName -> AccountName aliasReplace (BasicAlias old new) a | old `isAccountNamePrefixOf` a || old == a = new <> T.drop (T.length old) a | otherwise = a aliasReplace (RegexAlias re repl) a = T.pack $ regexReplaceCIMemo re repl $ T.unpack a -- XXX tests_Hledger_Data_Posting = TestList [ "accountNamePostingType" ~: do accountNamePostingType "a" `is` RegularPosting accountNamePostingType "(a)" `is` VirtualPosting accountNamePostingType "[a]" `is` BalancedVirtualPosting ,"accountNameWithoutPostingType" ~: do accountNameWithoutPostingType "(a)" `is` "a" ,"accountNameWithPostingType" ~: do accountNameWithPostingType VirtualPosting "[a]" `is` "(a)" ,"joinAccountNames" ~: do "a" `joinAccountNames` "b:c" `is` "a:b:c" "a" `joinAccountNames` "(b:c)" `is` "(a:b:c)" "[a]" `joinAccountNames` "(b:c)" `is` "[a:b:c]" "" `joinAccountNames` "a" `is` "a" ,"concatAccountNames" ~: do concatAccountNames [] `is` "" concatAccountNames ["a","(b)","[c:d]"] `is` "(a:b:c:d)" ] hledger-lib-1.2/Hledger/Data/RawOptions.hs0000644000000000000000000000324613066746043016622 0ustar0000000000000000{-| hledger's cmdargs modes parse command-line arguments to an intermediate format, RawOpts (an association list), rather than a fixed ADT like CliOpts. This allows the modes and flags to be reused more easily by hledger commands/scripts in this and other packages. -} module Hledger.Data.RawOptions ( RawOpts, setopt, setboolopt, inRawOpts, boolopt, stringopt, maybestringopt, listofstringopt, intopt, maybeintopt ) where import Data.Maybe import qualified Data.Text as T import Safe import Hledger.Utils -- | The result of running cmdargs: an association list of option names to string values. type RawOpts = [(String,String)] setopt :: String -> String -> RawOpts -> RawOpts setopt name val = (++ [(name, quoteIfNeeded $ val)]) setboolopt :: String -> RawOpts -> RawOpts setboolopt name = (++ [(name,"")]) -- | Is the named option present ? inRawOpts :: String -> RawOpts -> Bool inRawOpts name = isJust . lookup name boolopt :: String -> RawOpts -> Bool boolopt = inRawOpts maybestringopt :: String -> RawOpts -> Maybe String maybestringopt name = maybe Nothing (Just . T.unpack . stripquotes . T.pack) . lookup name . reverse stringopt :: String -> RawOpts -> String stringopt name = fromMaybe "" . maybestringopt name listofstringopt :: String -> RawOpts -> [String] listofstringopt name rawopts = [v | (k,v) <- rawopts, k==name] maybeintopt :: String -> RawOpts -> Maybe Int maybeintopt name rawopts = let ms = maybestringopt name rawopts in case ms of Nothing -> Nothing Just s -> Just $ readDef (usageError $ "could not parse "++name++" number: "++s) s intopt :: String -> RawOpts -> Int intopt name = fromMaybe 0 . maybeintopt name hledger-lib-1.2/Hledger/Data/StringFormat.hs0000644000000000000000000002131313035210046017110 0ustar0000000000000000-- | Parse format strings provided by --format, with awareness of -- hledger's report item fields. The formats are used by -- report-specific renderers like renderBalanceReportItem. {-# LANGUAGE FlexibleContexts, TypeFamilies #-} module Hledger.Data.StringFormat ( parseStringFormat , defaultStringFormatStyle , StringFormat(..) , StringFormatComponent(..) , ReportItemField(..) , tests ) where import Prelude () import Prelude.Compat import Numeric import Data.Char (isPrint) import Data.Maybe import Test.HUnit import Text.Megaparsec import Text.Megaparsec.String import Hledger.Utils.String (formatString) -- | A format specification/template to use when rendering a report line item as text. -- -- A format is a sequence of components; each is either a literal -- string, or a hledger report item field with specified width and -- justification whose value will be interpolated at render time. -- -- A component's value may be a multi-line string (or a -- multi-commodity amount), in which case the final string will be -- either single-line or a top or bottom-aligned multi-line string -- depending on the StringFormat variant used. -- -- Currently this is only used in the balance command's single-column -- mode, which provides a limited StringFormat renderer. -- data StringFormat = OneLine [StringFormatComponent] -- ^ multi-line values will be rendered on one line, comma-separated | TopAligned [StringFormatComponent] -- ^ values will be top-aligned (and bottom-padded to the same height) | BottomAligned [StringFormatComponent] -- ^ values will be bottom-aligned (and top-padded) deriving (Show, Eq) data StringFormatComponent = FormatLiteral String -- ^ Literal text to be rendered as-is | FormatField Bool (Maybe Int) (Maybe Int) ReportItemField -- ^ A data field to be formatted and interpolated. Parameters: -- -- - Left justify ? Right justified if false -- - Minimum width ? Will be space-padded if narrower than this -- - Maximum width ? Will be clipped if wider than this -- - Which of the standard hledger report item fields to interpolate deriving (Show, Eq) -- | An id identifying which report item field to interpolate. These -- are drawn from several hledger report types, so are not all -- applicable for a given report. data ReportItemField = AccountField -- ^ A posting or balance report item's account name | DefaultDateField -- ^ A posting or register or entry report item's date | DescriptionField -- ^ A posting or register or entry report item's description | TotalField -- ^ A balance or posting report item's balance or running total. -- Always rendered right-justified. | DepthSpacerField -- ^ A balance report item's indent level (which may be different from the account name depth). -- Rendered as this number of spaces, multiplied by the minimum width spec if any. | FieldNo Int -- ^ A report item's nth field. May be unimplemented. deriving (Show, Eq) ---------------------------------------------------------------------- -- renderStringFormat :: StringFormat -> Map String String -> String -- renderStringFormat fmt params = ---------------------------------------------------------------------- -- | Parse a string format specification, or return a parse error. parseStringFormat :: String -> Either String StringFormat parseStringFormat input = case (runParser (stringformatp <* eof) "(unknown)") input of Left y -> Left $ show y Right x -> Right x defaultStringFormatStyle = BottomAligned stringformatp :: Parser StringFormat stringformatp = do alignspec <- optional (try $ char '%' >> oneOf "^_,") let constructor = case alignspec of Just '^' -> TopAligned Just '_' -> BottomAligned Just ',' -> OneLine _ -> defaultStringFormatStyle constructor <$> many componentp componentp :: Parser StringFormatComponent componentp = formatliteralp <|> formatfieldp formatliteralp :: Parser StringFormatComponent formatliteralp = do s <- some c return $ FormatLiteral s where isPrintableButNotPercentage x = isPrint x && (not $ x == '%') c = (satisfy isPrintableButNotPercentage "printable character") <|> try (string "%%" >> return '%') formatfieldp :: Parser StringFormatComponent formatfieldp = do char '%' leftJustified <- optional (char '-') minWidth <- optional (some $ digitChar) maxWidth <- optional (do char '.'; some $ digitChar) -- TODO: Can this be (char '1') *> (some digitChar) char '(' f <- fieldp char ')' return $ FormatField (isJust leftJustified) (parseDec minWidth) (parseDec maxWidth) f where parseDec s = case s of Just text -> Just m where ((m,_):_) = readDec text _ -> Nothing fieldp :: Parser ReportItemField fieldp = do try (string "account" >> return AccountField) <|> try (string "depth_spacer" >> return DepthSpacerField) <|> try (string "date" >> return DescriptionField) <|> try (string "description" >> return DescriptionField) <|> try (string "total" >> return TotalField) <|> try (some digitChar >>= (\s -> return $ FieldNo $ read s)) ---------------------------------------------------------------------- testFormat :: StringFormatComponent -> String -> String -> Assertion testFormat fs value expected = assertEqual name expected actual where (name, actual) = case fs of FormatLiteral l -> ("literal", formatString False Nothing Nothing l) FormatField leftJustify min max _ -> ("field", formatString leftJustify min max value) testParser :: String -> StringFormat -> Assertion testParser s expected = case (parseStringFormat s) of Left error -> assertFailure $ show error Right actual -> assertEqual ("Input: " ++ s) expected actual tests = test [ formattingTests ++ parserTests ] formattingTests = [ testFormat (FormatLiteral " ") "" " " , testFormat (FormatField False Nothing Nothing DescriptionField) "description" "description" , testFormat (FormatField False (Just 20) Nothing DescriptionField) "description" " description" , testFormat (FormatField False Nothing (Just 20) DescriptionField) "description" "description" , testFormat (FormatField True Nothing (Just 20) DescriptionField) "description" "description" , testFormat (FormatField True (Just 20) Nothing DescriptionField) "description" "description " , testFormat (FormatField True (Just 20) (Just 20) DescriptionField) "description" "description " , testFormat (FormatField True Nothing (Just 3) DescriptionField) "description" "des" ] parserTests = [ testParser "" (defaultStringFormatStyle []) , testParser "D" (defaultStringFormatStyle [FormatLiteral "D"]) , testParser "%(date)" (defaultStringFormatStyle [FormatField False Nothing Nothing DescriptionField]) , testParser "%(total)" (defaultStringFormatStyle [FormatField False Nothing Nothing TotalField]) , testParser "^%(total)" (TopAligned [FormatField False Nothing Nothing TotalField]) , testParser "_%(total)" (BottomAligned [FormatField False Nothing Nothing TotalField]) , testParser ",%(total)" (OneLine [FormatField False Nothing Nothing TotalField]) , testParser "Hello %(date)!" (defaultStringFormatStyle [FormatLiteral "Hello ", FormatField False Nothing Nothing DescriptionField, FormatLiteral "!"]) , testParser "%-(date)" (defaultStringFormatStyle [FormatField True Nothing Nothing DescriptionField]) , testParser "%20(date)" (defaultStringFormatStyle [FormatField False (Just 20) Nothing DescriptionField]) , testParser "%.10(date)" (defaultStringFormatStyle [FormatField False Nothing (Just 10) DescriptionField]) , testParser "%20.10(date)" (defaultStringFormatStyle [FormatField False (Just 20) (Just 10) DescriptionField]) , testParser "%20(account) %.10(total)\n" (defaultStringFormatStyle [FormatField False (Just 20) Nothing AccountField , FormatLiteral " " , FormatField False Nothing (Just 10) TotalField , FormatLiteral "\n" ]) ] hledger-lib-1.2/Hledger/Data/Timeclock.hs0000644000000000000000000001350213035210046016404 0ustar0000000000000000{-| A 'TimeclockEntry' is a clock-in, clock-out, or other directive in a timeclock file (see timeclock.el or the command-line version). These can be converted to 'Transactions' and queried like a ledger. -} {-# LANGUAGE CPP, OverloadedStrings #-} module Hledger.Data.Timeclock where import Data.Maybe -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Data.Time.Clock import Data.Time.Format import Data.Time.LocalTime #if !(MIN_VERSION_time(1,5,0)) import System.Locale (defaultTimeLocale) #endif import Test.HUnit import Text.Printf import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Amount import Hledger.Data.Posting import Hledger.Data.Transaction instance Show TimeclockEntry where show t = printf "%s %s %s %s" (show $ tlcode t) (show $ tldatetime t) (tlaccount t) (tldescription t) instance Show TimeclockCode where show SetBalance = "b" show SetRequiredHours = "h" show In = "i" show Out = "o" show FinalOut = "O" instance Read TimeclockCode where readsPrec _ ('b' : xs) = [(SetBalance, xs)] readsPrec _ ('h' : xs) = [(SetRequiredHours, xs)] readsPrec _ ('i' : xs) = [(In, xs)] readsPrec _ ('o' : xs) = [(Out, xs)] readsPrec _ ('O' : xs) = [(FinalOut, xs)] readsPrec _ _ = [] -- | Convert time log entries to journal transactions. When there is no -- clockout, add one with the provided current time. Sessions crossing -- midnight are split into days to give accurate per-day totals. timeclockEntriesToTransactions :: LocalTime -> [TimeclockEntry] -> [Transaction] timeclockEntriesToTransactions _ [] = [] timeclockEntriesToTransactions now [i] | odate > idate = entryFromTimeclockInOut i o' : timeclockEntriesToTransactions now [i',o] | otherwise = [entryFromTimeclockInOut i o] where o = TimeclockEntry (tlsourcepos i) Out end "" "" end = if itime > now then itime else now (itime,otime) = (tldatetime i,tldatetime o) (idate,odate) = (localDay itime,localDay otime) o' = o{tldatetime=itime{localDay=idate, localTimeOfDay=TimeOfDay 23 59 59}} i' = i{tldatetime=itime{localDay=addDays 1 idate, localTimeOfDay=midnight}} timeclockEntriesToTransactions now (i:o:rest) | odate > idate = entryFromTimeclockInOut i o' : timeclockEntriesToTransactions now (i':o:rest) | otherwise = entryFromTimeclockInOut i o : timeclockEntriesToTransactions now rest where (itime,otime) = (tldatetime i,tldatetime o) (idate,odate) = (localDay itime,localDay otime) o' = o{tldatetime=itime{localDay=idate, localTimeOfDay=TimeOfDay 23 59 59}} i' = i{tldatetime=itime{localDay=addDays 1 idate, localTimeOfDay=midnight}} -- | Convert a timeclock clockin and clockout entry to an equivalent journal -- transaction, representing the time expenditure. Note this entry is not balanced, -- since we omit the \"assets:time\" transaction for simpler output. entryFromTimeclockInOut :: TimeclockEntry -> TimeclockEntry -> Transaction entryFromTimeclockInOut i o | otime >= itime = t | otherwise = error' $ "clock-out time less than clock-in time in:\n" ++ showTransaction t where t = Transaction { tindex = 0, tsourcepos = tlsourcepos i, tdate = idate, tdate2 = Nothing, tstatus = Cleared, tcode = "", tdescription = desc, tcomment = "", ttags = [], tpostings = ps, tpreceding_comment_lines="" } itime = tldatetime i otime = tldatetime o itod = localTimeOfDay itime otod = localTimeOfDay otime idate = localDay itime desc | T.null (tldescription i) = T.pack $ showtime itod ++ "-" ++ showtime otod | otherwise = tldescription i showtime = take 5 . show hours = elapsedSeconds (toutc otime) (toutc itime) / 3600 where toutc = localTimeToUTC utc acctname = tlaccount i amount = Mixed [hrs hours] ps = [posting{paccount=acctname, pamount=amount, ptype=VirtualPosting, ptransaction=Just t}] tests_Hledger_Data_Timeclock = TestList [ "timeclockEntriesToTransactions" ~: do today <- getCurrentDay now' <- getCurrentTime tz <- getCurrentTimeZone let now = utcToLocalTime tz now' nowstr = showtime now yesterday = prevday today clockin = TimeclockEntry nullsourcepos In mktime d = LocalTime d . fromMaybe midnight . #if MIN_VERSION_time(1,5,0) parseTimeM True defaultTimeLocale "%H:%M:%S" #else parseTime defaultTimeLocale "%H:%M:%S" #endif showtime = formatTime defaultTimeLocale "%H:%M" assertEntriesGiveStrings name es ss = assertEqual name ss (map (T.unpack . tdescription) $ timeclockEntriesToTransactions now es) assertEntriesGiveStrings "started yesterday, split session at midnight" [clockin (mktime yesterday "23:00:00") "" ""] ["23:00-23:59","00:00-"++nowstr] assertEntriesGiveStrings "split multi-day sessions at each midnight" [clockin (mktime (addDays (-2) today) "23:00:00") "" ""] ["23:00-23:59","00:00-23:59","00:00-"++nowstr] assertEntriesGiveStrings "auto-clock-out if needed" [clockin (mktime today "00:00:00") "" ""] ["00:00-"++nowstr] let future = utcToLocalTime tz $ addUTCTime 100 now' futurestr = showtime future assertEntriesGiveStrings "use the clockin time for auto-clockout if it's in the future" [clockin future "" ""] [printf "%s-%s" futurestr futurestr] ] hledger-lib-1.2/Hledger/Data/Transaction.hs0000644000000000000000000007435613066173044017007 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-| A 'Transaction' represents a movement of some commodity(ies) between two or more accounts. It consists of multiple account 'Posting's which balance to zero, a date, and optional extras like description, cleared status, and tags. -} {-# LANGUAGE OverloadedStrings, LambdaCase #-} module Hledger.Data.Transaction ( -- * Transaction nullsourcepos, nulltransaction, txnTieKnot, txnUntieKnot, -- * operations showAccountName, hasRealPostings, realPostings, assignmentPostings, virtualPostings, balancedVirtualPostings, transactionsPostings, isTransactionBalanced, -- nonzerobalanceerror, -- * date operations transactionDate2, -- * arithmetic transactionPostingBalances, balanceTransaction, balanceTransactionUpdate, -- * rendering showTransaction, showTransactionUnelided, showTransactionUnelidedOneLineAmounts, showPostingLine, showPostingLines, -- * GenericSourcePos sourceFilePath, sourceFirstLine, showGenericSourcePos, -- * misc. tests_Hledger_Data_Transaction ) where import Data.List import Control.Monad.Except import Control.Monad.Identity import Data.Maybe import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Test.HUnit import Text.Printf import qualified Data.Map as Map import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Posting import Hledger.Data.Amount instance Show Transaction where show = showTransactionUnelided instance Show ModifierTransaction where show t = "= " ++ T.unpack (mtvalueexpr t) ++ "\n" ++ unlines (map show (mtpostings t)) instance Show PeriodicTransaction where show t = "~ " ++ T.unpack (ptperiodicexpr t) ++ "\n" ++ unlines (map show (ptpostings t)) sourceFilePath :: GenericSourcePos -> FilePath sourceFilePath = \case GenericSourcePos fp _ _ -> fp JournalSourcePos fp _ -> fp sourceFirstLine :: GenericSourcePos -> Int sourceFirstLine = \case GenericSourcePos _ line _ -> line JournalSourcePos _ (line, _) -> line showGenericSourcePos :: GenericSourcePos -> String showGenericSourcePos = \case GenericSourcePos fp line column -> show fp ++ " (line " ++ show line ++ ", column " ++ show column ++ ")" JournalSourcePos fp (line, line') -> show fp ++ " (lines " ++ show line ++ "-" ++ show line' ++ ")" nullsourcepos :: GenericSourcePos nullsourcepos = GenericSourcePos "" 1 1 nulltransaction :: Transaction nulltransaction = Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=nulldate, tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="", tcomment="", ttags=[], tpostings=[], tpreceding_comment_lines="" } {-| Show a journal transaction, formatted for the print command. ledger 2.x's standard format looks like this: @ yyyy/mm/dd[ *][ CODE] description......... [ ; comment...............] account name 1..................... ...$amount1[ ; comment...............] account name 2..................... ..$-amount1[ ; comment...............] pcodewidth = no limit -- 10 -- mimicking ledger layout. pdescwidth = no limit -- 20 -- I don't remember what these mean, pacctwidth = 35 minimum, no maximum -- they were important at the time. pamtwidth = 11 pcommentwidth = no limit -- 22 @ -} showTransaction :: Transaction -> String showTransaction = showTransactionHelper True False showTransactionUnelided :: Transaction -> String showTransactionUnelided = showTransactionHelper False False tests_showTransactionUnelided = [ "showTransactionUnelided" ~: do let t `gives` s = assertEqual "" s (showTransactionUnelided t) nulltransaction `gives` "0000/01/01\n\n" nulltransaction{ tdate=parsedate "2012/05/14", tdate2=Just $ parsedate "2012/05/15", tstatus=Uncleared, tcode="code", tdescription="desc", tcomment="tcomment1\ntcomment2\n", ttags=[("ttag1","val1")], tpostings=[ nullposting{ pstatus=Cleared, paccount="a", pamount=Mixed [usd 1, hrs 2], pcomment="\npcomment2\n", ptype=RegularPosting, ptags=[("ptag1","val1"),("ptag2","val2")] } ] } `gives` unlines [ "2012/05/14=2012/05/15 (code) desc ; tcomment1", " ; tcomment2", " * a $1.00", " ; pcomment2", " * a 2.00h", " ; pcomment2", "" ] ] showTransactionUnelidedOneLineAmounts :: Transaction -> String showTransactionUnelidedOneLineAmounts = showTransactionHelper False True -- cf showPosting showTransactionHelper :: Bool -> Bool -> Transaction -> String showTransactionHelper elide onelineamounts t = unlines $ [descriptionline] ++ newlinecomments ++ (postingsAsLines elide onelineamounts t (tpostings t)) ++ [""] where descriptionline = rstrip $ concat [date, status, code, desc, samelinecomment] date = showDate (tdate t) ++ maybe "" (("="++) . showDate) (tdate2 t) status | tstatus t == Cleared = " *" | tstatus t == Pending = " !" | otherwise = "" code = if T.length (tcode t) > 0 then printf " (%s)" $ T.unpack $ tcode t else "" desc = if null d then "" else " " ++ d where d = T.unpack $ tdescription t (samelinecomment, newlinecomments) = case renderCommentLines (tcomment t) of [] -> ("",[]) c:cs -> (c,cs) -- Render a transaction or posting's comment as indented, semicolon-prefixed comment lines. renderCommentLines :: Text -> [String] renderCommentLines t = case lines $ T.unpack t of ("":ls) -> "":map commentprefix ls ls -> map commentprefix ls where commentprefix = indent . ("; "++) -- -- Render a transaction or posting's comment as semicolon-prefixed comment lines - -- -- an inline (same-line) comment if it's a single line, otherwise multiple indented lines. -- commentLines' :: String -> (String, [String]) -- commentLines' s -- | null s = ("", []) -- | length ls == 1 = (prefix $ head ls, []) -- | otherwise = ("", (prefix $ head ls):(map prefix $ tail ls)) -- where -- ls = lines s -- prefix = indent . (";"++) postingsAsLines :: Bool -> Bool -> Transaction -> [Posting] -> [String] postingsAsLines elide onelineamounts t ps | elide && length ps > 1 && isTransactionBalanced Nothing t -- imprecise balanced check = (concatMap (postingAsLines False onelineamounts ps) $ init ps) ++ postingAsLines True onelineamounts ps (last ps) | otherwise = concatMap (postingAsLines False onelineamounts ps) ps postingAsLines :: Bool -> Bool -> [Posting] -> Posting -> [String] postingAsLines elideamount onelineamounts ps p = concat [ postingblock ++ newlinecomments | postingblock <- postingblocks] where postingblocks = [map rstrip $ lines $ concatTopPadded [account, " ", amount, assertion, samelinecomment] | amount <- shownAmounts] assertion = maybe "" ((" = " ++) . showAmountWithZeroCommodity) $ pbalanceassertion p account = indent $ showstatus p ++ fitString (Just acctwidth) Nothing False True (showAccountName Nothing (ptype p) (paccount p)) where showstatus p = if pstatus p == Cleared then "* " else "" acctwidth = maximum $ map (textWidth . paccount) ps -- currently prices are considered part of the amount string when right-aligning amounts shownAmounts | elideamount = [""] | onelineamounts = [fitString (Just amtwidth) Nothing False False $ showMixedAmountOneLine $ pamount p] | otherwise = map (fitStringMulti (Just amtwidth) Nothing False False . showAmount ) . amounts $ pamount p where amtwidth = maximum $ 12 : map (strWidth . showMixedAmount . pamount) ps -- min. 12 for backwards compatibility (samelinecomment, newlinecomments) = case renderCommentLines (pcomment p) of [] -> ("",[]) c:cs -> (c,cs) -- used in balance assertion error showPostingLine p = indent $ if pstatus p == Cleared then "* " else "" ++ showAccountName Nothing (ptype p) (paccount p) ++ " " ++ showMixedAmountOneLine (pamount p) -- | Produce posting line with all comment lines associated with it showPostingLines :: Posting -> [String] showPostingLines p = postingAsLines False False ps p where ps | Just t <- ptransaction p = tpostings t | otherwise = [p] tests_postingAsLines = [ "postingAsLines" ~: do let p `gives` ls = assertEqual (show p) ls (postingAsLines False False [p] p) posting `gives` [] posting{ pstatus=Cleared, paccount="a", pamount=Mixed [usd 1, hrs 2], pcomment="pcomment1\npcomment2\n tag3: val3 \n", ptype=RegularPosting, ptags=[("ptag1","val1"),("ptag2","val2")] } `gives` [ " * a $1.00 ; pcomment1", " ; pcomment2", " ; tag3: val3 ", " * a 2.00h ; pcomment1", " ; pcomment2", " ; tag3: val3 " ] ] tests_inference = [ "inferBalancingAmount" ~: do let p `gives` p' = assertEqual (show p) (Right p') $ inferTransaction p inferTransaction :: Transaction -> Either String Transaction inferTransaction = runIdentity . runExceptT . inferBalancingAmount (\_ _ -> return ()) nulltransaction `gives` nulltransaction nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` missingamt ]} `gives` nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` usd 5 ]} nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` (eur 3 @@ usd 4), "c" `post` missingamt ]} `gives` nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` (eur 3 @@ usd 4), "c" `post` usd 1 ]} ] indent :: String -> String indent = (" "++) -- | Show an account name, clipped to the given width if any, and -- appropriately bracketed/parenthesised for the given posting type. showAccountName :: Maybe Int -> PostingType -> AccountName -> String showAccountName w = fmt where fmt RegularPosting = take w' . T.unpack fmt VirtualPosting = parenthesise . reverse . take (w'-2) . reverse . T.unpack fmt BalancedVirtualPosting = bracket . reverse . take (w'-2) . reverse . T.unpack w' = fromMaybe 999999 w parenthesise :: String -> String parenthesise s = "("++s++")" bracket :: String -> String bracket s = "["++s++"]" hasRealPostings :: Transaction -> Bool hasRealPostings = not . null . realPostings realPostings :: Transaction -> [Posting] realPostings = filter isReal . tpostings assignmentPostings :: Transaction -> [Posting] assignmentPostings = filter isAssignment . tpostings virtualPostings :: Transaction -> [Posting] virtualPostings = filter isVirtual . tpostings balancedVirtualPostings :: Transaction -> [Posting] balancedVirtualPostings = filter isBalancedVirtual . tpostings transactionsPostings :: [Transaction] -> [Posting] transactionsPostings = concat . map tpostings -- | Get the sums of a transaction's real, virtual, and balanced virtual postings. transactionPostingBalances :: Transaction -> (MixedAmount,MixedAmount,MixedAmount) transactionPostingBalances t = (sumPostings $ realPostings t ,sumPostings $ virtualPostings t ,sumPostings $ balancedVirtualPostings t) -- | Is this transaction balanced ? A balanced transaction's real -- (non-virtual) postings sum to 0, and any balanced virtual postings -- also sum to 0. isTransactionBalanced :: Maybe (Map.Map CommoditySymbol AmountStyle) -> Transaction -> Bool isTransactionBalanced styles t = -- isReallyZeroMixedAmountCost rsum && isReallyZeroMixedAmountCost bvsum isZeroMixedAmount rsum' && isZeroMixedAmount bvsum' where (rsum, _, bvsum) = transactionPostingBalances t rsum' = canonicalise $ costOfMixedAmount rsum bvsum' = canonicalise $ costOfMixedAmount bvsum canonicalise = maybe id canonicaliseMixedAmount styles -- | Ensure this transaction is balanced, possibly inferring a missing -- amount or conversion price(s), or return an error message. -- Balancing is affected by commodity display precisions, so those can -- (optionally) be provided. -- -- this fails for example, if there are several missing amounts -- (possibly with balance assignments) balanceTransaction :: Maybe (Map.Map CommoditySymbol AmountStyle) -> Transaction -> Either String Transaction balanceTransaction stylemap = runIdentity . runExceptT . balanceTransactionUpdate (\_ _ -> return ()) stylemap -- | More general version of 'balanceTransaction' that takes an update -- function balanceTransactionUpdate :: MonadError String m => (AccountName -> MixedAmount -> m ()) -- ^ update function -> Maybe (Map.Map CommoditySymbol AmountStyle) -> Transaction -> m Transaction balanceTransactionUpdate update styles t = finalize =<< inferBalancingAmount update t where finalize t' = let t'' = inferBalancingPrices t' in if isTransactionBalanced styles t'' then return $ txnTieKnot t'' else throwError $ printerr $ nonzerobalanceerror t'' printerr s = intercalate "\n" [s, showTransactionUnelided t] nonzerobalanceerror :: Transaction -> String nonzerobalanceerror t = printf "could not balance this transaction (%s%s%s)" rmsg sep bvmsg where (rsum, _, bvsum) = transactionPostingBalances t rmsg | isReallyZeroMixedAmountCost rsum = "" | otherwise = "real postings are off by " ++ showMixedAmount (costOfMixedAmount rsum) bvmsg | isReallyZeroMixedAmountCost bvsum = "" | otherwise = "balanced virtual postings are off by " ++ showMixedAmount (costOfMixedAmount bvsum) sep = if not (null rmsg) && not (null bvmsg) then "; " else "" :: String -- | Infer up to one missing amount for this transactions's real postings, and -- likewise for its balanced virtual postings, if needed; or return an error -- message if we can't. -- -- We can infer a missing amount when there are multiple postings and exactly -- one of them is amountless. If the amounts had price(s) the inferred amount -- have the same price(s), and will be converted to the price commodity. inferBalancingAmount :: MonadError String m => (AccountName -> MixedAmount -> m ()) -- ^ update function -> Transaction -> m Transaction inferBalancingAmount update t@Transaction{tpostings=ps} | length amountlessrealps > 1 = throwError $ printerr "could not balance this transaction - can't have more than one real posting with no amount (remember to put 2 or more spaces before amounts)" | length amountlessbvps > 1 = throwError $ printerr "could not balance this transaction - can't have more than one balanced virtual posting with no amount (remember to put 2 or more spaces before amounts)" | otherwise = do postings <- mapM inferamount ps return t{tpostings=postings} where printerr s = intercalate "\n" [s, showTransactionUnelided t] (amountfulrealps, amountlessrealps) = partition hasAmount (realPostings t) realsum = sumStrict $ map pamount amountfulrealps (amountfulbvps, amountlessbvps) = partition hasAmount (balancedVirtualPostings t) bvsum = sumStrict $ map pamount amountfulbvps inferamount p@Posting{ptype=RegularPosting} | not (hasAmount p) = updateAmount p realsum inferamount p@Posting{ptype=BalancedVirtualPosting} | not (hasAmount p) = updateAmount p bvsum inferamount p = return p updateAmount p amt = update (paccount p) amt' >> return p { pamount=amt', porigin=Just $ originalPosting p } where amt' = normaliseMixedAmount $ costOfMixedAmount (-amt) -- | Infer prices for this transaction's posting amounts, if needed to make -- the postings balance, and if possible. This is done once for the real -- postings and again (separately) for the balanced virtual postings. When -- it's not possible, the transaction is left unchanged. -- -- The simplest example is a transaction with two postings, each in a -- different commodity, with no prices specified. In this case we'll add a -- price to the first posting such that it can be converted to the commodity -- of the second posting (with -B), and such that the postings balance. -- -- In general, we can infer a conversion price when the sum of posting amounts -- contains exactly two different commodities and no explicit prices. Also -- all postings are expected to contain an explicit amount (no missing -- amounts) in a single commodity. Otherwise no price inferring is attempted. -- -- The transaction itself could contain more than two commodities, and/or -- prices, if they cancel out; what matters is that the sum of posting amounts -- contains exactly two commodities and zero prices. -- -- There can also be more than two postings in either of the commodities. -- -- We want to avoid excessive display of digits when the calculated price is -- an irrational number, while hopefully also ensuring the displayed numbers -- make sense if the user does a manual calculation. This is (mostly) achieved -- in two ways: -- -- - when there is only one posting in the "from" commodity, a total price -- (@@) is used, and all available decimal digits are shown -- -- - otherwise, a suitable averaged unit price (@) is applied to the relevant -- postings, with display precision equal to the summed display precisions -- of the two commodities being converted between, or 2, whichever is larger. -- -- (We don't always calculate a good-looking display precision for unit prices -- when the commodity display precisions are low, eg when a journal doesn't -- use any decimal places. The minimum of 2 helps make the prices shown by the -- print command a bit less surprising in this case. Could do better.) -- inferBalancingPrices :: Transaction -> Transaction inferBalancingPrices t@Transaction{tpostings=ps} = t{tpostings=ps'} where ps' = map (priceInferrerFor t BalancedVirtualPosting) $ map (priceInferrerFor t RegularPosting) $ ps -- | Generate a posting update function which assigns a suitable balancing -- price to the posting, if and as appropriate for the given transaction and -- posting type (real or balanced virtual). priceInferrerFor :: Transaction -> PostingType -> (Posting -> Posting) priceInferrerFor t pt = inferprice where postings = filter ((==pt).ptype) $ tpostings t pmixedamounts = map pamount postings pamounts = concatMap amounts pmixedamounts pcommodities = map acommodity pamounts sumamounts = amounts $ sumStrict pmixedamounts -- sum normalises to one amount per commodity & price sumcommodities = map acommodity sumamounts sumprices = filter (/=NoPrice) $ map aprice sumamounts caninferprices = length sumcommodities == 2 && null sumprices inferprice p@Posting{pamount=Mixed [a]} | caninferprices && ptype p == pt && acommodity a == fromcommodity = p{pamount=Mixed [a{aprice=conversionprice}], porigin=Just $ originalPosting p} where fromcommodity = head $ filter (`elem` sumcommodities) pcommodities -- these heads are ugly but should be safe conversionprice | fromcount==1 = TotalPrice $ abs toamount `withPrecision` maxprecision | otherwise = UnitPrice $ abs unitprice `withPrecision` unitprecision where fromcount = length $ filter ((==fromcommodity).acommodity) pamounts fromamount = head $ filter ((==fromcommodity).acommodity) sumamounts tocommodity = head $ filter (/=fromcommodity) sumcommodities toamount = head $ filter ((==tocommodity).acommodity) sumamounts unitprice = toamount `divideAmount` (aquantity fromamount) unitprecision = max 2 ((asprecision $ astyle $ toamount) + (asprecision $ astyle $ fromamount)) inferprice p = p -- Get a transaction's secondary date, defaulting to the primary date. transactionDate2 :: Transaction -> Day transactionDate2 t = fromMaybe (tdate t) $ tdate2 t -- | Ensure a transaction's postings refer back to it, so that eg -- relatedPostings works right. txnTieKnot :: Transaction -> Transaction txnTieKnot t@Transaction{tpostings=ps} = t' where t' = t{tpostings=map (postingSetTransaction t') ps} -- | Ensure a transaction's postings do not refer back to it, so that eg -- recursiveSize and GHCI's :sprint work right. txnUntieKnot :: Transaction -> Transaction txnUntieKnot t@Transaction{tpostings=ps} = t{tpostings=map (\p -> p{ptransaction=Nothing}) ps} -- | Set a posting's parent transaction. postingSetTransaction :: Transaction -> Posting -> Posting postingSetTransaction t p = p{ptransaction=Just t} tests_Hledger_Data_Transaction = TestList $ concat [ tests_postingAsLines, tests_showTransactionUnelided, tests_inference, [ "showTransaction" ~: do assertEqual "show a balanced transaction, eliding last amount" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking" ,"" ]) (let t = Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18], ptransaction=Just t} ,posting{paccount="assets:checking", pamount=Mixed [usd (-47.18)], ptransaction=Just t} ] "" in showTransaction t) ,"showTransaction" ~: do assertEqual "show a balanced transaction, no eliding" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking $-47.18" ,"" ]) (let t = Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18], ptransaction=Just t} ,posting{paccount="assets:checking", pamount=Mixed [usd (-47.18)], ptransaction=Just t} ] "" in showTransactionUnelided t) -- document some cases that arise in debug/testing: ,"showTransaction" ~: do assertEqual "show an unbalanced transaction, should not elide" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking $-47.19" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18]} ,posting{paccount="assets:checking", pamount=Mixed [usd (-47.19)]} ] "")) ,"showTransaction" ~: do assertEqual "show an unbalanced transaction with one posting, should not elide" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18]} ] "")) ,"showTransaction" ~: do assertEqual "show a transaction with one posting and a missing amount" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=missingmixedamt} ] "")) ,"showTransaction" ~: do assertEqual "show a transaction with a priced commodityless amount" (unlines ["2010/01/01 x" ," a 1 @ $2" ," b" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2010/01/01") Nothing Uncleared "" "x" "" [] [posting{paccount="a", pamount=Mixed [num 1 `at` (usd 2 `withPrecision` 0)]} ,posting{paccount="b", pamount= missingmixedamt} ] "")) ,"balanceTransaction" ~: do assertBool "detect unbalanced entry, sign error" (isLeft $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "test" "" [] [posting{paccount="a", pamount=Mixed [usd 1]} ,posting{paccount="b", pamount=Mixed [usd 1]} ] "")) assertBool "detect unbalanced entry, multiple missing amounts" (isLeft $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "test" "" [] [posting{paccount="a", pamount=missingmixedamt} ,posting{paccount="b", pamount=missingmixedamt} ] "")) let e = balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1]} ,posting{paccount="b", pamount=missingmixedamt} ] "") assertBool "balanceTransaction allows one missing amount" (isRight e) assertEqual "balancing amount is inferred" (Mixed [usd (-1)]) (case e of Right e' -> (pamount $ last $ tpostings e') Left _ -> error' "should not happen") let e = balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2011/01/01") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1.35]} ,posting{paccount="b", pamount=Mixed [eur (-1)]} ] "") assertBool "balanceTransaction can infer conversion price" (isRight e) assertEqual "balancing conversion price is inferred" (Mixed [usd 1.35 @@ (eur 1 `withPrecision` maxprecision)]) (case e of Right e' -> (pamount $ head $ tpostings e') Left _ -> error' "should not happen") assertBool "balanceTransaction balances based on cost if there are unit prices" (isRight $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2011/01/01") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1 `at` eur 2]} ,posting{paccount="a", pamount=Mixed [usd (-2) `at` eur 1]} ] "")) assertBool "balanceTransaction balances based on cost if there are total prices" (isRight $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2011/01/01") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1 @@ eur 1]} ,posting{paccount="a", pamount=Mixed [usd (-2) @@ eur 1]} ] "")) ,"isTransactionBalanced" ~: do let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ] "" assertBool "detect balanced" (isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.01)], ptransaction=Just t} ] "" assertBool "detect unbalanced" (not $ isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ] "" assertBool "detect unbalanced, one posting" (not $ isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 0], ptransaction=Just t} ] "" assertBool "one zero posting is considered balanced for now" (isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ,posting{paccount="d", pamount=Mixed [usd 100], ptype=VirtualPosting, ptransaction=Just t} ] "" assertBool "virtual postings don't need to balance" (isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ,posting{paccount="d", pamount=Mixed [usd 100], ptype=BalancedVirtualPosting, ptransaction=Just t} ] "" assertBool "balanced virtual postings need to balance among themselves" (not $ isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ,posting{paccount="d", pamount=Mixed [usd 100], ptype=BalancedVirtualPosting, ptransaction=Just t} ,posting{paccount="3", pamount=Mixed [usd (-100)], ptype=BalancedVirtualPosting, ptransaction=Just t} ] "" assertBool "balanced virtual postings need to balance among themselves (2)" (isTransactionBalanced Nothing t) ]] hledger-lib-1.2/Hledger/Data/Types.hs0000644000000000000000000003607113066173044015616 0ustar0000000000000000{-# LANGUAGE CPP, DeriveDataTypeable, StandaloneDeriving, DeriveGeneric, TypeSynonymInstances, FlexibleInstances #-} {-| Most data types are defined here to avoid import cycles. Here is an overview of the hledger data model: > Journal -- a journal is read from one or more data files. It contains.. > [Transaction] -- journal transactions (aka entries), which have date, cleared status, code, description and.. > [Posting] -- multiple account postings, which have account name and amount > [MarketPrice] -- historical market prices for commodities > > Ledger -- a ledger is derived from a journal, by applying a filter specification and doing some further processing. It contains.. > Journal -- a filtered copy of the original journal, containing only the transactions and postings we are interested in > [Account] -- all accounts, in tree order beginning with a "root" account", with their balances and sub/parent accounts For more detailed documentation on each type, see the corresponding modules. -} module Hledger.Data.Types where import GHC.Generics (Generic) import Control.DeepSeq (NFData) import Control.Monad.Except (ExceptT) import Data.Data import Data.Decimal import Data.Default import Text.Blaze (ToMarkup(..)) import qualified Data.Map as M import Data.Text (Text) -- import qualified Data.Text as T import Data.Time.Calendar import Data.Time.LocalTime import System.Time (ClockTime(..)) import Hledger.Utils.Regex type SmartDate = (String,String,String) data WhichDate = PrimaryDate | SecondaryDate deriving (Eq,Show) data DateSpan = DateSpan (Maybe Day) (Maybe Day) deriving (Eq,Ord,Data,Generic,Typeable) instance NFData DateSpan -- synonyms for various date-related scalars type Year = Integer type Month = Int -- 1-12 type Quarter = Int -- 1-4 type YearWeek = Int -- 1-52 type MonthWeek = Int -- 1-5 type YearDay = Int -- 1-366 type MonthDay = Int -- 1-31 type WeekDay = Int -- 1-7 -- Typical report periods (spans of time), both finite and open-ended. -- A richer abstraction than DateSpan. data Period = DayPeriod Day | WeekPeriod Day | MonthPeriod Year Month | QuarterPeriod Year Quarter | YearPeriod Year | PeriodBetween Day Day | PeriodFrom Day | PeriodTo Day | PeriodAll deriving (Eq,Ord,Show,Data,Generic,Typeable) instance Default Period where def = PeriodAll ---- Typical report period/subperiod durations, from a day to a year. --data Duration = -- DayLong -- WeekLong -- MonthLong -- QuarterLong -- YearLong -- deriving (Eq,Ord,Show,Data,Generic,Typeable) -- Ways in which a period can be divided into subperiods. data Interval = NoInterval | Days Int | Weeks Int | Months Int | Quarters Int | Years Int | DayOfMonth Int | DayOfWeek Int -- WeekOfYear Int -- MonthOfYear Int -- QuarterOfYear Int deriving (Eq,Show,Ord,Data,Generic,Typeable) instance Default Interval where def = NoInterval instance NFData Interval type AccountName = Text data AccountAlias = BasicAlias AccountName AccountName | RegexAlias Regexp Replacement deriving (Eq, Read, Show, Ord, Data, Generic, Typeable) instance NFData AccountAlias data Side = L | R deriving (Eq,Show,Read,Ord,Typeable,Data,Generic) instance NFData Side -- | The basic numeric type used in amounts. type Quantity = Decimal deriving instance Data (Quantity) -- The following is for hledger-web, and requires blaze-markup. -- Doing it here avoids needing a matching flag on the hledger-web package. instance ToMarkup (Quantity) where toMarkup = toMarkup . show -- | An amount's price (none, per unit, or total) in another commodity. -- Note the price should be a positive number, although this is not enforced. data Price = NoPrice | UnitPrice Amount | TotalPrice Amount deriving (Eq,Ord,Typeable,Data,Generic) instance NFData Price -- | Display style for an amount. data AmountStyle = AmountStyle { ascommodityside :: Side, -- ^ does the symbol appear on the left or the right ? ascommodityspaced :: Bool, -- ^ space between symbol and quantity ? asprecision :: !Int, -- ^ number of digits displayed after the decimal point asdecimalpoint :: Maybe Char, -- ^ character used as decimal point: period or comma. Nothing means "unspecified, use default" asdigitgroups :: Maybe DigitGroupStyle -- ^ style for displaying digit groups, if any } deriving (Eq,Ord,Read,Show,Typeable,Data,Generic) instance NFData AmountStyle -- | A style for displaying digit groups in the integer part of a -- floating point number. It consists of the character used to -- separate groups (comma or period, whichever is not used as decimal -- point), and the size of each group, starting with the one nearest -- the decimal point. The last group size is assumed to repeat. Eg, -- comma between thousands is DigitGroups ',' [3]. data DigitGroupStyle = DigitGroups Char [Int] deriving (Eq,Ord,Read,Show,Typeable,Data,Generic) instance NFData DigitGroupStyle type CommoditySymbol = Text data Commodity = Commodity { csymbol :: CommoditySymbol, cformat :: Maybe AmountStyle } deriving (Show,Eq,Data,Generic) --,Ord,Typeable,Data,Generic) instance NFData Commodity data Amount = Amount { acommodity :: CommoditySymbol, aquantity :: Quantity, aprice :: Price, -- ^ the (fixed) price for this amount, if any astyle :: AmountStyle } deriving (Eq,Ord,Typeable,Data,Generic) instance NFData Amount newtype MixedAmount = Mixed [Amount] deriving (Eq,Ord,Typeable,Data,Generic) instance NFData MixedAmount data PostingType = RegularPosting | VirtualPosting | BalancedVirtualPosting deriving (Eq,Show,Typeable,Data,Generic) instance NFData PostingType type TagName = Text type TagValue = Text type Tag = (TagName, TagValue) -- ^ A tag name and (possibly empty) value. data ClearedStatus = Uncleared | Pending | Cleared deriving (Eq,Ord,Typeable,Data,Generic) instance NFData ClearedStatus instance Show ClearedStatus where -- custom show.. bad idea.. don't do it.. show Uncleared = "" show Pending = "!" show Cleared = "*" data Posting = Posting { pdate :: Maybe Day, -- ^ this posting's date, if different from the transaction's pdate2 :: Maybe Day, -- ^ this posting's secondary date, if different from the transaction's pstatus :: ClearedStatus, paccount :: AccountName, pamount :: MixedAmount, pcomment :: Text, -- ^ this posting's comment lines, as a single non-indented multi-line string ptype :: PostingType, ptags :: [Tag], -- ^ tag names and values, extracted from the comment pbalanceassertion :: Maybe Amount, -- ^ optional: the expected balance in this commodity in the account after this posting ptransaction :: Maybe Transaction, -- ^ this posting's parent transaction (co-recursive types). -- Tying this knot gets tedious, Maybe makes it easier/optional. porigin :: Maybe Posting -- ^ original posting if this one is result of any transformations (one level only) } deriving (Typeable,Data,Generic) instance NFData Posting -- The equality test for postings ignores the parent transaction's -- identity, to avoid infinite loops. instance Eq Posting where (==) (Posting a1 b1 c1 d1 e1 f1 g1 h1 i1 _ _) (Posting a2 b2 c2 d2 e2 f2 g2 h2 i2 _ _) = a1==a2 && b1==b2 && c1==c2 && d1==d2 && e1==e2 && f1==f2 && g1==g2 && h1==h2 && i1==i2 -- TODO: needs renaming, or removal if no longer needed. See also TextPosition in Hledger.UI.Editor -- | The position of parse errors (eg), like parsec's SourcePos but generic. data GenericSourcePos = GenericSourcePos FilePath Int Int -- ^ name, 1-based line number and 1-based column number. | JournalSourcePos FilePath (Int, Int) -- ^ file name, inclusive range of 1-based line numbers (first, last). deriving (Eq, Read, Show, Ord, Data, Generic, Typeable) instance NFData GenericSourcePos data Transaction = Transaction { tindex :: Integer, -- ^ this transaction's 1-based position in the input stream, or 0 when not available tsourcepos :: GenericSourcePos, tdate :: Day, tdate2 :: Maybe Day, tstatus :: ClearedStatus, tcode :: Text, tdescription :: Text, tcomment :: Text, -- ^ this transaction's comment lines, as a single non-indented multi-line string ttags :: [Tag], -- ^ tag names and values, extracted from the comment tpostings :: [Posting], -- ^ this transaction's postings tpreceding_comment_lines :: Text -- ^ any comment lines immediately preceding this transaction } deriving (Eq,Typeable,Data,Generic) instance NFData Transaction data ModifierTransaction = ModifierTransaction { mtvalueexpr :: Text, mtpostings :: [Posting] } deriving (Eq,Typeable,Data,Generic) instance NFData ModifierTransaction data PeriodicTransaction = PeriodicTransaction { ptperiodicexpr :: Text, ptpostings :: [Posting] } deriving (Eq,Typeable,Data,Generic) instance NFData PeriodicTransaction data TimeclockCode = SetBalance | SetRequiredHours | In | Out | FinalOut deriving (Eq,Ord,Typeable,Data,Generic) instance NFData TimeclockCode data TimeclockEntry = TimeclockEntry { tlsourcepos :: GenericSourcePos, tlcode :: TimeclockCode, tldatetime :: LocalTime, tlaccount :: AccountName, tldescription :: Text } deriving (Eq,Ord,Typeable,Data,Generic) instance NFData TimeclockEntry data MarketPrice = MarketPrice { mpdate :: Day, mpcommodity :: CommoditySymbol, mpamount :: Amount } deriving (Eq,Ord,Typeable,Data,Generic) -- & Show (in Amount.hs) instance NFData MarketPrice -- | A Journal, containing transactions and various other things. -- The basic data model for hledger. -- -- This is used during parsing (as the type alias ParsedJournal), and -- then finalised/validated for use as a Journal. Some extra -- parsing-related fields are included for convenience, at least for -- now. In a ParsedJournal these are updated as parsing proceeds, in a -- Journal they represent the final state at end of parsing (used eg -- by the add command). -- data Journal = Journal { -- parsing-related data jparsedefaultyear :: Maybe Year -- ^ the current default year, specified by the most recent Y directive (or current date) ,jparsedefaultcommodity :: Maybe (CommoditySymbol,AmountStyle) -- ^ the current default commodity and its format, specified by the most recent D directive ,jparseparentaccounts :: [AccountName] -- ^ the current stack of parent account names, specified by apply account directives ,jparsealiases :: [AccountAlias] -- ^ the current account name aliases in effect, specified by alias directives (& options ?) -- ,jparsetransactioncount :: Integer -- ^ the current count of transactions parsed so far (only journal format txns, currently) ,jparsetimeclockentries :: [TimeclockEntry] -- ^ timeclock sessions which have not been clocked out -- principal data ,jaccounts :: [AccountName] -- ^ accounts that have been declared by account directives ,jcommodities :: M.Map CommoditySymbol Commodity -- ^ commodities and formats declared by commodity directives ,jinferredcommodities :: M.Map CommoditySymbol AmountStyle -- ^ commodities and formats inferred from journal amounts ,jmarketprices :: [MarketPrice] ,jmodifiertxns :: [ModifierTransaction] ,jperiodictxns :: [PeriodicTransaction] ,jtxns :: [Transaction] ,jfinalcommentlines :: Text -- ^ any final trailing comments in the (main) journal file ,jfiles :: [(FilePath, Text)] -- ^ the file path and raw text of the main and -- any included journal files. The main file is first, -- followed by any included files in the order encountered. ,jlastreadtime :: ClockTime -- ^ when this journal was last read from its file(s) } deriving (Eq, Typeable, Data, Generic) deriving instance Data (ClockTime) deriving instance Typeable (ClockTime) deriving instance Generic (ClockTime) instance NFData ClockTime instance NFData Journal -- | A journal in the process of being parsed, not yet finalised. -- The data is partial, and list fields are in reverse order. type ParsedJournal = Journal -- | The id of a data format understood by hledger, eg @journal@ or @csv@. -- The --output-format option selects one of these for output. type StorageFormat = String -- | A hledger journal reader is a triple of storage format name, a -- detector of that format, and a parser from that format to Journal. data Reader = Reader { -- The canonical name of the format handled by this reader rFormat :: StorageFormat -- The file extensions recognised as containing this format ,rExtensions :: [String] -- A text parser for this format, accepting an optional rules file, -- assertion-checking flag, and file path for error messages, -- producing an exception-raising IO action that returns a journal -- or error message. ,rParser :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal -- Experimental readers are never tried automatically. ,rExperimental :: Bool } instance Show Reader where show r = rFormat r ++ " reader" -- | An account, with name, balances and links to parent/subaccounts -- which let you walk up or down the account tree. data Account = Account { aname :: AccountName, -- ^ this account's full name aebalance :: MixedAmount, -- ^ this account's balance, excluding subaccounts asubs :: [Account], -- ^ sub-accounts anumpostings :: Int, -- ^ number of postings to this account -- derived from the above : aibalance :: MixedAmount, -- ^ this account's balance, including subaccounts aparent :: Maybe Account, -- ^ parent account aboring :: Bool -- ^ used in the accounts report to label elidable parents } deriving (Typeable, Data, Generic) -- | A Ledger has the journal it derives from, and the accounts -- derived from that. Accounts are accessible both list-wise and -- tree-wise, since each one knows its parent and subs; the first -- account is the root of the tree and always exists. data Ledger = Ledger { ljournal :: Journal, laccounts :: [Account] } hledger-lib-1.2/Hledger/Query.hs0000644000000000000000000010553513042200120014724 0ustar0000000000000000{-| A general query system for matching things (accounts, postings, transactions..) by various criteria, and a parser for query expressions. -} {-# LANGUAGE DeriveDataTypeable, OverloadedStrings, ViewPatterns #-} module Hledger.Query ( -- * Query and QueryOpt Query(..), QueryOpt(..), -- * parsing parseQuery, simplifyQuery, filterQuery, -- * accessors queryIsNull, queryIsAcct, queryIsDepth, queryIsDate, queryIsDate2, queryIsDateOrDate2, queryIsStartDateOnly, queryIsSym, queryIsReal, queryIsStatus, queryIsEmpty, queryStartDate, queryEndDate, queryDateSpan, queryDateSpan', queryDepth, inAccount, inAccountQuery, -- * matching matchesTransaction, matchesPosting, matchesAccount, matchesMixedAmount, matchesAmount, words'', -- * tests tests_Hledger_Query ) where import Data.Data import Data.Either import Data.List import Data.Maybe import Data.Monoid ((<>)) -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Safe (readDef, headDef) import Test.HUnit import Text.Megaparsec import Text.Megaparsec.Text import Hledger.Utils hiding (words') import Hledger.Data.Types import Hledger.Data.AccountName import Hledger.Data.Amount (amount, nullamt, usd) import Hledger.Data.Dates import Hledger.Data.Posting import Hledger.Data.Transaction -- | A query is a composition of search criteria, which can be used to -- match postings, transactions, accounts and more. data Query = Any -- ^ always match | None -- ^ never match | Not Query -- ^ negate this match | Or [Query] -- ^ match if any of these match | And [Query] -- ^ match if all of these match | Code Regexp -- ^ match if code matches this regexp | Desc Regexp -- ^ match if description matches this regexp | Acct Regexp -- ^ match postings whose account matches this regexp | Date DateSpan -- ^ match if primary date in this date span | Date2 DateSpan -- ^ match if secondary date in this date span | Status ClearedStatus -- ^ match txns/postings with this cleared status (Status Uncleared matches all states except cleared) | Real Bool -- ^ match if "realness" (involves a real non-virtual account ?) has this value | Amt OrdPlus Quantity -- ^ match if the amount's numeric quantity is less than/greater than/equal to/unsignedly equal to some value | Sym Regexp -- ^ match if the entire commodity symbol is matched by this regexp | Empty Bool -- ^ if true, show zero-amount postings/accounts which are usually not shown -- more of a query option than a query criteria ? | Depth Int -- ^ match if account depth is less than or equal to this value. -- Depth is sometimes used like a query (for filtering report data) -- and sometimes like a query option (for controlling display) | Tag Regexp (Maybe Regexp) -- ^ match if a tag's name, and optionally its value, is matched by these respective regexps -- matching the regexp if provided, exists deriving (Eq,Data,Typeable) -- custom Show implementation to show strings more accurately, eg for debugging regexps instance Show Query where show Any = "Any" show None = "None" show (Not q) = "Not (" ++ show q ++ ")" show (Or qs) = "Or (" ++ show qs ++ ")" show (And qs) = "And (" ++ show qs ++ ")" show (Code r) = "Code " ++ show r show (Desc r) = "Desc " ++ show r show (Acct r) = "Acct " ++ show r show (Date ds) = "Date (" ++ show ds ++ ")" show (Date2 ds) = "Date2 (" ++ show ds ++ ")" show (Status b) = "Status " ++ show b show (Real b) = "Real " ++ show b show (Amt ord qty) = "Amt " ++ show ord ++ " " ++ show qty show (Sym r) = "Sym " ++ show r show (Empty b) = "Empty " ++ show b show (Depth n) = "Depth " ++ show n show (Tag s ms) = "Tag " ++ show s ++ " (" ++ show ms ++ ")" -- | A query option changes a query's/report's behaviour and output in some way. data QueryOpt = QueryOptInAcctOnly AccountName -- ^ show an account register focussed on this account | QueryOptInAcct AccountName -- ^ as above but include sub-accounts in the account register -- | QueryOptCostBasis -- ^ show amounts converted to cost where possible -- | QueryOptDate2 -- ^ show secondary dates instead of primary dates deriving (Show, Eq, Data, Typeable) -- parsing -- -- | A query restricting the account(s) to be shown in the sidebar, if any. -- -- Just looks at the first query option. -- showAccountMatcher :: [QueryOpt] -> Maybe Query -- showAccountMatcher (QueryOptInAcctSubsOnly a:_) = Just $ Acct True $ accountNameToAccountRegex a -- showAccountMatcher _ = Nothing -- | Convert a query expression containing zero or more space-separated -- terms to a query and zero or more query options. A query term is either: -- -- 1. a search pattern, which matches on one or more fields, eg: -- -- acct:REGEXP - match the account name with a regular expression -- desc:REGEXP - match the transaction description -- date:PERIODEXP - match the date with a period expression -- -- The prefix indicates the field to match, or if there is no prefix -- account name is assumed. -- -- 2. a query option, which modifies the reporting behaviour in some -- way. There is currently one of these, which may appear only once: -- -- inacct:FULLACCTNAME -- -- The usual shell quoting rules are assumed. When a pattern contains -- whitespace, it (or the whole term including prefix) should be enclosed -- in single or double quotes. -- -- Period expressions may contain relative dates, so a reference date is -- required to fully parse these. -- -- Multiple terms are combined as follows: -- 1. multiple account patterns are OR'd together -- 2. multiple description patterns are OR'd together -- 3. then all terms are AND'd together parseQuery :: Day -> T.Text -> (Query,[QueryOpt]) parseQuery d s = (q, opts) where terms = words'' prefixes s (pats, opts) = partitionEithers $ map (parseQueryTerm d) terms (descpats, pats') = partition queryIsDesc pats (acctpats, otherpats) = partition queryIsAcct pats' q = simplifyQuery $ And $ [Or acctpats, Or descpats] ++ otherpats tests_parseQuery = [ "parseQuery" ~: do let d = nulldate -- parsedate "2011/1/1" parseQuery d "acct:'expenses:autres d\233penses' desc:b" `is` (And [Acct "expenses:autres d\233penses", Desc "b"], []) parseQuery d "inacct:a desc:\"b b\"" `is` (Desc "b b", [QueryOptInAcct "a"]) parseQuery d "inacct:a inacct:b" `is` (Any, [QueryOptInAcct "a", QueryOptInAcct "b"]) parseQuery d "desc:'x x'" `is` (Desc "x x", []) parseQuery d "'a a' 'b" `is` (Or [Acct "a a",Acct "'b"], []) parseQuery d "\"" `is` (Acct "\"", []) ] -- XXX -- | Quote-and-prefix-aware version of words - don't split on spaces which -- are inside quotes, including quotes which may have one of the specified -- prefixes in front, and maybe an additional not: prefix in front of that. words'' :: [T.Text] -> T.Text -> [T.Text] words'' prefixes = fromparse . parsewith maybeprefixedquotedphrases -- XXX where maybeprefixedquotedphrases :: Parser [T.Text] maybeprefixedquotedphrases = choice' [prefixedQuotedPattern, singleQuotedPattern, doubleQuotedPattern, pattern] `sepBy` some spacenonewline prefixedQuotedPattern :: Parser T.Text prefixedQuotedPattern = do not' <- fromMaybe "" `fmap` (optional $ string "not:") let allowednexts | null not' = prefixes | otherwise = prefixes ++ [""] next <- fmap T.pack $ choice' $ map (string . T.unpack) allowednexts let prefix :: T.Text prefix = T.pack not' <> next p <- singleQuotedPattern <|> doubleQuotedPattern return $ prefix <> stripquotes p singleQuotedPattern :: Parser T.Text singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf ("'" :: [Char])) >>= return . stripquotes . T.pack doubleQuotedPattern :: Parser T.Text doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf ("\"" :: [Char])) >>= return . stripquotes . T.pack pattern :: Parser T.Text pattern = fmap T.pack $ many (noneOf (" \n\r" :: [Char])) tests_words'' = [ "words''" ~: do assertEqual "1" ["a","b"] (words'' [] "a b") assertEqual "2" ["a b"] (words'' [] "'a b'") assertEqual "3" ["not:a","b"] (words'' [] "not:a b") assertEqual "4" ["not:a b"] (words'' [] "not:'a b'") assertEqual "5" ["not:a b"] (words'' [] "'not:a b'") assertEqual "6" ["not:desc:a b"] (words'' ["desc:"] "not:desc:'a b'") let s `gives` r = assertEqual "" r (words'' prefixes s) "\"acct:expenses:autres d\233penses\"" `gives` ["acct:expenses:autres d\233penses"] "\"" `gives` ["\""] ] -- XXX -- keep synced with patterns below, excluding "not" prefixes :: [T.Text] prefixes = map (<>":") [ "inacctonly" ,"inacct" ,"amt" ,"code" ,"desc" ,"acct" ,"date" ,"date2" ,"status" ,"cur" ,"real" ,"empty" ,"depth" ,"tag" ] defaultprefix :: T.Text defaultprefix = "acct" -- -- | Parse the query string as a boolean tree of match patterns. -- parseQueryTerm :: String -> Query -- parseQueryTerm s = either (const (Any)) id $ runParser query () "" $ lexmatcher s -- lexmatcher :: String -> [String] -- lexmatcher s = words' s -- query :: GenParser String () Query -- query = undefined -- | Parse a single query term as either a query or a query option, -- or raise an error if it has invalid syntax. parseQueryTerm :: Day -> T.Text -> Either Query QueryOpt parseQueryTerm _ (T.stripPrefix "inacctonly:" -> Just s) = Right $ QueryOptInAcctOnly s parseQueryTerm _ (T.stripPrefix "inacct:" -> Just s) = Right $ QueryOptInAcct s parseQueryTerm d (T.stripPrefix "not:" -> Just s) = case parseQueryTerm d s of Left m -> Left $ Not m Right _ -> Left Any -- not:somequeryoption will be ignored parseQueryTerm _ (T.stripPrefix "code:" -> Just s) = Left $ Code $ T.unpack s parseQueryTerm _ (T.stripPrefix "desc:" -> Just s) = Left $ Desc $ T.unpack s parseQueryTerm _ (T.stripPrefix "acct:" -> Just s) = Left $ Acct $ T.unpack s parseQueryTerm d (T.stripPrefix "date2:" -> Just s) = case parsePeriodExpr d s of Left e -> error' $ "\"date2:"++T.unpack s++"\" gave a "++showDateParseError e Right (_,span) -> Left $ Date2 span parseQueryTerm d (T.stripPrefix "date:" -> Just s) = case parsePeriodExpr d s of Left e -> error' $ "\"date:"++T.unpack s++"\" gave a "++showDateParseError e Right (_,span) -> Left $ Date span parseQueryTerm _ (T.stripPrefix "status:" -> Just s) = case parseStatus s of Left e -> error' $ "\"status:"++T.unpack s++"\" gave a parse error: " ++ e Right st -> Left $ Status st parseQueryTerm _ (T.stripPrefix "real:" -> Just s) = Left $ Real $ parseBool s || T.null s parseQueryTerm _ (T.stripPrefix "amt:" -> Just s) = Left $ Amt ord q where (ord, q) = parseAmountQueryTerm s parseQueryTerm _ (T.stripPrefix "empty:" -> Just s) = Left $ Empty $ parseBool s parseQueryTerm _ (T.stripPrefix "depth:" -> Just s) | n >= 0 = Left $ Depth n | otherwise = error' "depth: should have a positive number" where n = readDef 0 (T.unpack s) parseQueryTerm _ (T.stripPrefix "cur:" -> Just s) = Left $ Sym (T.unpack s) -- support cur: as an alias parseQueryTerm _ (T.stripPrefix "tag:" -> Just s) = Left $ Tag n v where (n,v) = parseTag s parseQueryTerm _ "" = Left $ Any parseQueryTerm d s = parseQueryTerm d $ defaultprefix<>":"<>s tests_parseQueryTerm = [ "parseQueryTerm" ~: do let s `gives` r = parseQueryTerm nulldate s `is` r "a" `gives` (Left $ Acct "a") "acct:expenses:autres d\233penses" `gives` (Left $ Acct "expenses:autres d\233penses") "not:desc:a b" `gives` (Left $ Not $ Desc "a b") "status:1" `gives` (Left $ Status Cleared) "status:*" `gives` (Left $ Status Cleared) "status:!" `gives` (Left $ Status Pending) "status:0" `gives` (Left $ Status Uncleared) "status:" `gives` (Left $ Status Uncleared) "real:1" `gives` (Left $ Real True) "date:2008" `gives` (Left $ Date $ DateSpan (Just $ parsedate "2008/01/01") (Just $ parsedate "2009/01/01")) "date:from 2012/5/17" `gives` (Left $ Date $ DateSpan (Just $ parsedate "2012/05/17") Nothing) "inacct:a" `gives` (Right $ QueryOptInAcct "a") "tag:a" `gives` (Left $ Tag "a" Nothing) "tag:a=some value" `gives` (Left $ Tag "a" (Just "some value")) -- "amt:<0" `gives` (Left $ Amt LT 0) -- "amt:=.23" `gives` (Left $ Amt EQ 0.23) -- "amt:>10000.10" `gives` (Left $ Amt GT 10000.1) ] data OrdPlus = Lt | LtEq | Gt | GtEq | Eq | AbsLt | AbsLtEq | AbsGt | AbsGtEq | AbsEq deriving (Show,Eq,Data,Typeable) -- can fail parseAmountQueryTerm :: T.Text -> (OrdPlus, Quantity) parseAmountQueryTerm s' = case s' of -- feel free to do this a smarter way "" -> err (T.stripPrefix "<+" -> Just s) -> (Lt, readDef err (T.unpack s)) (T.stripPrefix "<=+" -> Just s) -> (LtEq, readDef err (T.unpack s)) (T.stripPrefix ">+" -> Just s) -> (Gt, readDef err (T.unpack s)) (T.stripPrefix ">=+" -> Just s) -> (GtEq, readDef err (T.unpack s)) (T.stripPrefix "=+" -> Just s) -> (Eq, readDef err (T.unpack s)) (T.stripPrefix "+" -> Just s) -> (Eq, readDef err (T.unpack s)) (T.stripPrefix "<-" -> Just s) -> (Lt, negate $ readDef err (T.unpack s)) (T.stripPrefix "<=-" -> Just s) -> (LtEq, negate $ readDef err (T.unpack s)) (T.stripPrefix ">-" -> Just s) -> (Gt, negate $ readDef err (T.unpack s)) (T.stripPrefix ">=-" -> Just s) -> (GtEq, negate $ readDef err (T.unpack s)) (T.stripPrefix "=-" -> Just s) -> (Eq, negate $ readDef err (T.unpack s)) (T.stripPrefix "-" -> Just s) -> (Eq, negate $ readDef err (T.unpack s)) (T.stripPrefix "<=" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (LtEq, 0) _ -> (AbsLtEq, n) (T.stripPrefix "<" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (Lt, 0) _ -> (AbsLt, n) (T.stripPrefix ">=" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (GtEq, 0) _ -> (AbsGtEq, n) (T.stripPrefix ">" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (Gt, 0) _ -> (AbsGt, n) (T.stripPrefix "=" -> Just s) -> (AbsEq, readDef err (T.unpack s)) s -> (AbsEq, readDef err (T.unpack s)) where err = error' $ "could not parse as '=', '<', or '>' (optional) followed by a (optionally signed) numeric quantity: " ++ T.unpack s' tests_parseAmountQueryTerm = [ "parseAmountQueryTerm" ~: do let s `gives` r = parseAmountQueryTerm s `is` r "<0" `gives` (Lt,0) -- special case for convenience, since AbsLt 0 would be always false ">0" `gives` (Gt,0) -- special case for convenience and consistency with above ">10000.10" `gives` (AbsGt,10000.1) "=0.23" `gives` (AbsEq,0.23) "0.23" `gives` (AbsEq,0.23) "<=+0.23" `gives` (LtEq,0.23) "-0.23" `gives` (Eq,(-0.23)) ] parseTag :: T.Text -> (Regexp, Maybe Regexp) parseTag s | "=" `T.isInfixOf` s = (T.unpack n, Just $ tail $ T.unpack v) | otherwise = (T.unpack s, Nothing) where (n,v) = T.break (=='=') s -- | Parse the value part of a "status:" query, or return an error. parseStatus :: T.Text -> Either String ClearedStatus parseStatus s | s `elem` ["*","1"] = Right Cleared | s `elem` ["!"] = Right Pending | s `elem` ["","0"] = Right Uncleared | otherwise = Left $ "could not parse "++show s++" as a status (should be *, ! or empty)" -- | Parse the boolean value part of a "status:" query. "1" means true, -- anything else will be parsed as false without error. parseBool :: T.Text -> Bool parseBool s = s `elem` truestrings truestrings :: [T.Text] truestrings = ["1"] simplifyQuery :: Query -> Query simplifyQuery q = let q' = simplify q in if q' == q then q else simplifyQuery q' where simplify (And []) = Any simplify (And [q]) = simplify q simplify (And qs) | same qs = simplify $ head qs | any (==None) qs = None | all queryIsDate qs = Date $ spansIntersect $ mapMaybe queryTermDateSpan qs | otherwise = And $ concat $ [map simplify dateqs, map simplify otherqs] where (dateqs, otherqs) = partition queryIsDate $ filter (/=Any) qs simplify (Or []) = Any simplify (Or [q]) = simplifyQuery q simplify (Or qs) | same qs = simplify $ head qs | any (==Any) qs = Any -- all queryIsDate qs = Date $ spansUnion $ mapMaybe queryTermDateSpan qs ? | otherwise = Or $ map simplify $ filter (/=None) qs simplify (Date (DateSpan Nothing Nothing)) = Any simplify (Date2 (DateSpan Nothing Nothing)) = Any simplify q = q tests_simplifyQuery = [ "simplifyQuery" ~: do let q `gives` r = assertEqual "" r (simplifyQuery q) Or [Acct "a"] `gives` Acct "a" Or [Any,None] `gives` Any And [Any,None] `gives` None And [Any,Any] `gives` Any And [Acct "b",Any] `gives` Acct "b" And [Any,And [Date (DateSpan Nothing Nothing)]] `gives` Any And [Date (DateSpan Nothing (Just $ parsedate "2013-01-01")), Date (DateSpan (Just $ parsedate "2012-01-01") Nothing)] `gives` Date (DateSpan (Just $ parsedate "2012-01-01") (Just $ parsedate "2013-01-01")) And [Or [],Or [Desc "b b"]] `gives` Desc "b b" ] same [] = True same (a:as) = all (a==) as -- | Remove query terms (or whole sub-expressions) not matching the given -- predicate from this query. XXX Semantics not completely clear. filterQuery :: (Query -> Bool) -> Query -> Query filterQuery p = simplifyQuery . filterQuery' p filterQuery' :: (Query -> Bool) -> Query -> Query filterQuery' p (And qs) = And $ map (filterQuery p) qs filterQuery' p (Or qs) = Or $ map (filterQuery p) qs -- filterQuery' p (Not q) = Not $ filterQuery p q filterQuery' p q = if p q then q else Any tests_filterQuery = [ "filterQuery" ~: do let (q,p) `gives` r = assertEqual "" r (filterQuery p q) (Any, queryIsDepth) `gives` Any (Depth 1, queryIsDepth) `gives` Depth 1 (And [And [Status Cleared,Depth 1]], not . queryIsDepth) `gives` Status Cleared -- (And [Date nulldatespan, Not (Or [Any, Depth 1])], queryIsDepth) `gives` And [Not (Or [Depth 1])] ] -- * accessors -- | Does this query match everything ? queryIsNull :: Query -> Bool queryIsNull Any = True queryIsNull (And []) = True queryIsNull (Not (Or [])) = True queryIsNull _ = False queryIsDepth :: Query -> Bool queryIsDepth (Depth _) = True queryIsDepth _ = False queryIsDate :: Query -> Bool queryIsDate (Date _) = True queryIsDate _ = False queryIsDate2 :: Query -> Bool queryIsDate2 (Date2 _) = True queryIsDate2 _ = False queryIsDateOrDate2 :: Query -> Bool queryIsDateOrDate2 (Date _) = True queryIsDateOrDate2 (Date2 _) = True queryIsDateOrDate2 _ = False queryIsDesc :: Query -> Bool queryIsDesc (Desc _) = True queryIsDesc _ = False queryIsAcct :: Query -> Bool queryIsAcct (Acct _) = True queryIsAcct _ = False queryIsSym :: Query -> Bool queryIsSym (Sym _) = True queryIsSym _ = False queryIsReal :: Query -> Bool queryIsReal (Real _) = True queryIsReal _ = False queryIsStatus :: Query -> Bool queryIsStatus (Status _) = True queryIsStatus _ = False queryIsEmpty :: Query -> Bool queryIsEmpty (Empty _) = True queryIsEmpty _ = False -- | Does this query specify a start date and nothing else (that would -- filter postings prior to the date) ? -- When the flag is true, look for a starting secondary date instead. queryIsStartDateOnly :: Bool -> Query -> Bool queryIsStartDateOnly _ Any = False queryIsStartDateOnly _ None = False queryIsStartDateOnly secondary (Or ms) = and $ map (queryIsStartDateOnly secondary) ms queryIsStartDateOnly secondary (And ms) = and $ map (queryIsStartDateOnly secondary) ms queryIsStartDateOnly False (Date (DateSpan (Just _) _)) = True queryIsStartDateOnly True (Date2 (DateSpan (Just _) _)) = True queryIsStartDateOnly _ _ = False -- | What start date (or secondary date) does this query specify, if any ? -- For OR expressions, use the earliest of the dates. NOT is ignored. queryStartDate :: Bool -> Query -> Maybe Day queryStartDate secondary (Or ms) = earliestMaybeDate $ map (queryStartDate secondary) ms queryStartDate secondary (And ms) = latestMaybeDate $ map (queryStartDate secondary) ms queryStartDate False (Date (DateSpan (Just d) _)) = Just d queryStartDate True (Date2 (DateSpan (Just d) _)) = Just d queryStartDate _ _ = Nothing -- | What end date (or secondary date) does this query specify, if any ? -- For OR expressions, use the latest of the dates. NOT is ignored. queryEndDate :: Bool -> Query -> Maybe Day queryEndDate secondary (Or ms) = latestMaybeDate' $ map (queryEndDate secondary) ms queryEndDate secondary (And ms) = earliestMaybeDate' $ map (queryEndDate secondary) ms queryEndDate False (Date (DateSpan _ (Just d))) = Just d queryEndDate True (Date2 (DateSpan _ (Just d))) = Just d queryEndDate _ _ = Nothing queryTermDateSpan (Date span) = Just span queryTermDateSpan _ = Nothing -- | What date span (or secondary date span) does this query specify ? -- For OR expressions, use the widest possible span. NOT is ignored. queryDateSpan :: Bool -> Query -> DateSpan queryDateSpan secondary q = spansUnion $ queryDateSpans secondary q -- | Extract all date (or secondary date) spans specified in this query. -- NOT is ignored. queryDateSpans :: Bool -> Query -> [DateSpan] queryDateSpans secondary (Or qs) = concatMap (queryDateSpans secondary) qs queryDateSpans secondary (And qs) = concatMap (queryDateSpans secondary) qs queryDateSpans False (Date span) = [span] queryDateSpans True (Date2 span) = [span] queryDateSpans _ _ = [] -- | What date span (or secondary date span) does this query specify ? -- For OR expressions, use the widest possible span. NOT is ignored. queryDateSpan' :: Query -> DateSpan queryDateSpan' q = spansUnion $ queryDateSpans' q -- | Extract all date (or secondary date) spans specified in this query. -- NOT is ignored. queryDateSpans' :: Query -> [DateSpan] queryDateSpans' (Or qs) = concatMap queryDateSpans' qs queryDateSpans' (And qs) = concatMap queryDateSpans' qs queryDateSpans' (Date span) = [span] queryDateSpans' (Date2 span) = [span] queryDateSpans' _ = [] -- | What is the earliest of these dates, where Nothing is latest ? earliestMaybeDate :: [Maybe Day] -> Maybe Day earliestMaybeDate mds = head $ sortBy compareMaybeDates mds ++ [Nothing] -- | What is the latest of these dates, where Nothing is earliest ? latestMaybeDate :: [Maybe Day] -> Maybe Day latestMaybeDate = headDef Nothing . sortBy (flip compareMaybeDates) -- | What is the earliest of these dates, ignoring Nothings ? earliestMaybeDate' :: [Maybe Day] -> Maybe Day earliestMaybeDate' = headDef Nothing . sortBy compareMaybeDates . filter isJust -- | What is the latest of these dates, ignoring Nothings ? latestMaybeDate' :: [Maybe Day] -> Maybe Day latestMaybeDate' = headDef Nothing . sortBy (flip compareMaybeDates) . filter isJust -- | Compare two maybe dates, Nothing is earliest. compareMaybeDates :: Maybe Day -> Maybe Day -> Ordering compareMaybeDates Nothing Nothing = EQ compareMaybeDates Nothing (Just _) = LT compareMaybeDates (Just _) Nothing = GT compareMaybeDates (Just a) (Just b) = compare a b -- | The depth limit this query specifies, or a large number if none. queryDepth :: Query -> Int queryDepth q = case queryDepth' q of [] -> 99999 ds -> minimum ds where queryDepth' (Depth d) = [d] queryDepth' (Or qs) = concatMap queryDepth' qs queryDepth' (And qs) = concatMap queryDepth' qs queryDepth' _ = [] -- | The account we are currently focussed on, if any, and whether subaccounts are included. -- Just looks at the first query option. inAccount :: [QueryOpt] -> Maybe (AccountName,Bool) inAccount [] = Nothing inAccount (QueryOptInAcctOnly a:_) = Just (a,False) inAccount (QueryOptInAcct a:_) = Just (a,True) -- | A query for the account(s) we are currently focussed on, if any. -- Just looks at the first query option. inAccountQuery :: [QueryOpt] -> Maybe Query inAccountQuery [] = Nothing inAccountQuery (QueryOptInAcctOnly a : _) = Just $ Acct $ accountNameToAccountOnlyRegex a inAccountQuery (QueryOptInAcct a : _) = Just $ Acct $ accountNameToAccountRegex a -- -- | Convert a query to its inverse. -- negateQuery :: Query -> Query -- negateQuery = Not -- matching -- | Does the match expression match this account ? -- A matching in: clause is also considered a match. matchesAccount :: Query -> AccountName -> Bool matchesAccount (None) _ = False matchesAccount (Not m) a = not $ matchesAccount m a matchesAccount (Or ms) a = any (`matchesAccount` a) ms matchesAccount (And ms) a = all (`matchesAccount` a) ms matchesAccount (Acct r) a = regexMatchesCI r (T.unpack a) -- XXX pack matchesAccount (Depth d) a = accountNameLevel a <= d matchesAccount (Tag _ _) _ = False matchesAccount _ _ = True tests_matchesAccount = [ "matchesAccount" ~: do assertBool "positive acct match" $ matchesAccount (Acct "b:c") "a:bb:c:d" -- assertBool "acct should match at beginning" $ not $ matchesAccount (Acct True "a:b") "c:a:b" let q `matches` a = assertBool "" $ q `matchesAccount` a Depth 2 `matches` "a:b" assertBool "" $ Depth 2 `matchesAccount` "a" assertBool "" $ Depth 2 `matchesAccount` "a:b" assertBool "" $ not $ Depth 2 `matchesAccount` "a:b:c" assertBool "" $ Date nulldatespan `matchesAccount` "a" assertBool "" $ Date2 nulldatespan `matchesAccount` "a" assertBool "" $ not $ (Tag "a" Nothing) `matchesAccount` "a" ] matchesMixedAmount :: Query -> MixedAmount -> Bool matchesMixedAmount q (Mixed []) = q `matchesAmount` nullamt matchesMixedAmount q (Mixed as) = any (q `matchesAmount`) as -- | Does the match expression match this (simple) amount ? matchesAmount :: Query -> Amount -> Bool matchesAmount (Not q) a = not $ q `matchesAmount` a matchesAmount (Any) _ = True matchesAmount (None) _ = False matchesAmount (Or qs) a = any (`matchesAmount` a) qs matchesAmount (And qs) a = all (`matchesAmount` a) qs -- matchesAmount (Amt ord n) a = compareAmount ord n a matchesAmount (Sym r) a = regexMatchesCI ("^" ++ r ++ "$") $ T.unpack $ acommodity a -- matchesAmount _ _ = True -- | Is this simple (single-amount) mixed amount's quantity less than, greater than, equal to, or unsignedly equal to this number ? -- For multi-amount (multiple commodities, or just unsimplified) mixed amounts this is always true. -- | Is this amount's quantity less than, greater than, equal to, or unsignedly equal to this number ? compareAmount :: OrdPlus -> Quantity -> Amount -> Bool compareAmount ord q Amount{aquantity=aq} = case ord of Lt -> aq < q LtEq -> aq <= q Gt -> aq > q GtEq -> aq >= q Eq -> aq == q AbsLt -> abs aq < abs q AbsLtEq -> abs aq <= abs q AbsGt -> abs aq > abs q AbsGtEq -> abs aq >= abs q AbsEq -> abs aq == abs q -- | Does the match expression match this posting ? -- -- Note that for account match we try both original and effective account matchesPosting :: Query -> Posting -> Bool matchesPosting (Not q) p = not $ q `matchesPosting` p matchesPosting (Any) _ = True matchesPosting (None) _ = False matchesPosting (Or qs) p = any (`matchesPosting` p) qs matchesPosting (And qs) p = all (`matchesPosting` p) qs matchesPosting (Code r) p = regexMatchesCI r $ maybe "" (T.unpack . tcode) $ ptransaction p matchesPosting (Desc r) p = regexMatchesCI r $ maybe "" (T.unpack . tdescription) $ ptransaction p matchesPosting (Acct r) p = matchesPosting p || matchesPosting (originalPosting p) where matchesPosting p = regexMatchesCI r $ T.unpack $ paccount p -- XXX pack matchesPosting (Date span) p = span `spanContainsDate` postingDate p matchesPosting (Date2 span) p = span `spanContainsDate` postingDate2 p matchesPosting (Status Uncleared) p = postingStatus p /= Cleared matchesPosting (Status s) p = postingStatus p == s matchesPosting (Real v) p = v == isReal p matchesPosting q@(Depth _) Posting{paccount=a} = q `matchesAccount` a matchesPosting q@(Amt _ _) Posting{pamount=amt} = q `matchesMixedAmount` amt -- matchesPosting q@(Amt _ _) Posting{pamount=amt} = q `matchesMixedAmount` amt -- matchesPosting (Empty v) Posting{pamount=a} = v == isZeroMixedAmount a -- matchesPosting (Empty False) Posting{pamount=a} = True -- matchesPosting (Empty True) Posting{pamount=a} = isZeroMixedAmount a matchesPosting (Empty _) _ = True matchesPosting (Sym r) Posting{pamount=Mixed as} = any (regexMatchesCI $ "^" ++ r ++ "$") $ map (T.unpack . acommodity) as matchesPosting (Tag n v) p = not $ null $ matchedTags n v $ postingAllTags p -- matchesPosting _ _ = False tests_matchesPosting = [ "matchesPosting" ~: do -- matching posting status.. assertBool "positive match on cleared posting status" $ (Status Cleared) `matchesPosting` nullposting{pstatus=Cleared} assertBool "negative match on cleared posting status" $ not $ (Not $ Status Cleared) `matchesPosting` nullposting{pstatus=Cleared} assertBool "positive match on unclered posting status" $ (Status Uncleared) `matchesPosting` nullposting{pstatus=Uncleared} assertBool "negative match on unclered posting status" $ not $ (Not $ Status Uncleared) `matchesPosting` nullposting{pstatus=Uncleared} assertBool "positive match on true posting status acquired from transaction" $ (Status Cleared) `matchesPosting` nullposting{pstatus=Uncleared,ptransaction=Just nulltransaction{tstatus=Cleared}} assertBool "real:1 on real posting" $ (Real True) `matchesPosting` nullposting{ptype=RegularPosting} assertBool "real:1 on virtual posting fails" $ not $ (Real True) `matchesPosting` nullposting{ptype=VirtualPosting} assertBool "real:1 on balanced virtual posting fails" $ not $ (Real True) `matchesPosting` nullposting{ptype=BalancedVirtualPosting} assertBool "a" $ (Acct "'b") `matchesPosting` nullposting{paccount="'b"} assertBool "b" $ not $ (Tag "a" (Just "r$")) `matchesPosting` nullposting assertBool "c" $ (Tag "foo" Nothing) `matchesPosting` nullposting{ptags=[("foo","")]} assertBool "d" $ (Tag "foo" Nothing) `matchesPosting` nullposting{ptags=[("foo","baz")]} assertBool "e" $ (Tag "foo" (Just "a")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "f" $ not $ (Tag "foo" (Just "a$")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "g" $ not $ (Tag " foo " (Just "a")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "h" $ not $ (Tag "foo foo" (Just " ar ba ")) `matchesPosting` nullposting{ptags=[("foo foo","bar bar")]} -- a tag match on a posting also sees inherited tags assertBool "i" $ (Tag "txntag" Nothing) `matchesPosting` nullposting{ptransaction=Just nulltransaction{ttags=[("txntag","")]}} assertBool "j" $ not $ (Sym "$") `matchesPosting` nullposting{pamount=Mixed [usd 1]} -- becomes "^$$", ie testing for null symbol assertBool "k" $ (Sym "\\$") `matchesPosting` nullposting{pamount=Mixed [usd 1]} -- have to quote $ for regexpr assertBool "l" $ (Sym "shekels") `matchesPosting` nullposting{pamount=Mixed [amount{acommodity="shekels"}]} assertBool "m" $ not $ (Sym "shek") `matchesPosting` nullposting{pamount=Mixed [amount{acommodity="shekels"}]} ] -- | Does the match expression match this transaction ? matchesTransaction :: Query -> Transaction -> Bool matchesTransaction (Not q) t = not $ q `matchesTransaction` t matchesTransaction (Any) _ = True matchesTransaction (None) _ = False matchesTransaction (Or qs) t = any (`matchesTransaction` t) qs matchesTransaction (And qs) t = all (`matchesTransaction` t) qs matchesTransaction (Code r) t = regexMatchesCI r $ T.unpack $ tcode t matchesTransaction (Desc r) t = regexMatchesCI r $ T.unpack $ tdescription t matchesTransaction q@(Acct _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Date span) t = spanContainsDate span $ tdate t matchesTransaction (Date2 span) t = spanContainsDate span $ transactionDate2 t matchesTransaction (Status Uncleared) t = tstatus t /= Cleared matchesTransaction (Status s) t = tstatus t == s matchesTransaction (Real v) t = v == hasRealPostings t matchesTransaction q@(Amt _ _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Empty _) _ = True matchesTransaction (Depth d) t = any (Depth d `matchesPosting`) $ tpostings t matchesTransaction q@(Sym _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Tag n v) t = not $ null $ matchedTags n v $ transactionAllTags t -- matchesTransaction _ _ = False tests_matchesTransaction = [ "matchesTransaction" ~: do let q `matches` t = assertBool "" $ q `matchesTransaction` t Any `matches` nulltransaction assertBool "" $ not $ (Desc "x x") `matchesTransaction` nulltransaction{tdescription="x"} assertBool "" $ (Desc "x x") `matchesTransaction` nulltransaction{tdescription="x x"} -- see posting for more tag tests assertBool "" $ (Tag "foo" (Just "a")) `matchesTransaction` nulltransaction{ttags=[("foo","bar")]} -- a tag match on a transaction also matches posting tags assertBool "" $ (Tag "postingtag" Nothing) `matchesTransaction` nulltransaction{tpostings=[nullposting{ptags=[("postingtag","")]}]} ] -- | Filter a list of tags by matching against their names and -- optionally also their values. matchedTags :: Regexp -> Maybe Regexp -> [Tag] -> [Tag] matchedTags namepat valuepat tags = filter (match namepat valuepat) tags where match npat Nothing (n,_) = regexMatchesCI npat (T.unpack n) -- XXX match npat (Just vpat) (n,v) = regexMatchesCI npat (T.unpack n) && regexMatchesCI vpat (T.unpack v) -- tests tests_Hledger_Query :: Test tests_Hledger_Query = TestList $ tests_simplifyQuery ++ tests_words'' ++ tests_filterQuery ++ tests_parseQueryTerm ++ tests_parseAmountQueryTerm ++ tests_parseQuery ++ tests_matchesAccount ++ tests_matchesPosting ++ tests_matchesTransaction hledger-lib-1.2/Hledger/Read.hs0000644000000000000000000002615213067102102014477 0ustar0000000000000000{-| This is the entry point to hledger's reading system, which can read Journals from various data formats. Use this module if you want to parse journal data or read journal files. Generally it should not be necessary to import modules below this one. -} {-# LANGUAGE ScopedTypeVariables, OverloadedStrings #-} module Hledger.Read ( -- * Journal files PrefixedFilePath, defaultJournal, defaultJournalPath, readJournalFiles, readJournalFile, requireJournalFileExists, ensureJournalFileExists, splitReaderPrefix, -- * Journal parsing readJournal, readJournal', -- * Re-exported JournalReader.accountaliasp, JournalReader.postingp, module Hledger.Read.Common, -- * Tests samplejournal, tests_Hledger_Read, ) where import Control.Applicative ((<|>)) import Control.Arrow (right) import qualified Control.Exception as C import Control.Monad.Except import Data.List import Data.Maybe import Data.Text (Text) import qualified Data.Text as T import Safe import System.Directory (doesFileExist, getHomeDirectory) import System.Environment (getEnv) import System.Exit (exitFailure) import System.FilePath ((), takeExtension) import System.IO (stderr) import Test.HUnit import Text.Printf import Hledger.Data.Dates (getCurrentDay) import Hledger.Data.Types import Hledger.Read.Common import qualified Hledger.Read.JournalReader as JournalReader -- import qualified Hledger.Read.LedgerReader as LedgerReader import qualified Hledger.Read.TimedotReader as TimedotReader import qualified Hledger.Read.TimeclockReader as TimeclockReader import qualified Hledger.Read.CsvReader as CsvReader import Hledger.Utils import Prelude hiding (getContents, writeFile) import Hledger.Utils.UTF8IOCompat (writeFile) journalEnvVar = "LEDGER_FILE" journalEnvVar2 = "LEDGER" journalDefaultFilename = ".hledger.journal" -- The available journal readers, each one handling a particular data format. readers :: [Reader] readers = [ JournalReader.reader ,TimeclockReader.reader ,TimedotReader.reader ,CsvReader.reader -- ,LedgerReader.reader ] readerNames :: [String] readerNames = map rFormat readers -- | A file path optionally prefixed by a reader name and colon -- (journal:, csv:, timedot:, etc.). type PrefixedFilePath = FilePath -- | Read the default journal file specified by the environment, or raise an error. defaultJournal :: IO Journal defaultJournal = defaultJournalPath >>= readJournalFile Nothing Nothing True >>= either error' return -- | Get the default journal file path specified by the environment. -- Like ledger, we look first for the LEDGER_FILE environment -- variable, and if that does not exist, for the legacy LEDGER -- environment variable. If neither is set, or the value is blank, -- return the hard-coded default, which is @.hledger.journal@ in the -- users's home directory (or in the current directory, if we cannot -- determine a home directory). defaultJournalPath :: IO String defaultJournalPath = do s <- envJournalPath if null s then defaultJournalPath else return s where envJournalPath = getEnv journalEnvVar `C.catch` (\(_::C.IOException) -> getEnv journalEnvVar2 `C.catch` (\(_::C.IOException) -> return "")) defaultJournalPath = do home <- getHomeDirectory `C.catch` (\(_::C.IOException) -> return "") return $ home journalDefaultFilename -- | @readJournalFiles mformat mrulesfile assrt prefixedfiles@ -- -- Read a Journal from each specified file path and combine them into one. -- Or, return the first error message. -- -- Combining Journals means concatenating them, basically. -- The parse state resets at the start of each file, which means that -- directives & aliases do not cross file boundaries. -- (The final parse state saved in the Journal does span all files, however.) -- -- As with readJournalFile, -- file paths can optionally have a READER: prefix, -- and the @mformat@, @mrulesfile, and @assrt@ arguments are supported -- (and these are applied to all files). -- readJournalFiles :: Maybe StorageFormat -> Maybe FilePath -> Bool -> [PrefixedFilePath] -> IO (Either String Journal) readJournalFiles mformat mrulesfile assrt prefixedfiles = do (right mconcat1 . sequence) <$> mapM (readJournalFile mformat mrulesfile assrt) prefixedfiles where mconcat1 :: Monoid t => [t] -> t mconcat1 [] = mempty mconcat1 x = foldr1 mappend x -- | @readJournalFile mformat mrulesfile assrt prefixedfile@ -- -- Read a Journal from this file, or from stdin if the file path is -, -- or return an error message. The file path can have a READER: prefix. -- -- The reader (data format) is chosen based on (in priority order): -- the @mformat@ argument; -- the file path's READER: prefix, if any; -- a recognised file name extension (in readJournal); -- if none of these identify a known reader, all built-in readers are tried in turn. -- -- A CSV conversion rules file (@mrulesfiles@) can be specified to help convert CSV data. -- -- Optionally, any balance assertions in the journal can be checked (@assrt@). -- readJournalFile :: Maybe StorageFormat -> Maybe FilePath -> Bool -> PrefixedFilePath -> IO (Either String Journal) readJournalFile mformat mrulesfile assrt prefixedfile = do let (mprefixformat, f) = splitReaderPrefix prefixedfile mfmt = mformat <|> mprefixformat requireJournalFileExists f readFileOrStdinAnyLineEnding f >>= readJournal mfmt mrulesfile assrt (Just f) -- | If a filepath is prefixed by one of the reader names and a colon, -- split that off. Eg "csv:-" -> (Just "csv", "-"). splitReaderPrefix :: PrefixedFilePath -> (Maybe String, FilePath) splitReaderPrefix f = headDef (Nothing, f) [(Just r, drop (length r + 1) f) | r <- readerNames, (r++":") `isPrefixOf` f] -- | If the specified journal file does not exist (and is not "-"), -- give a helpful error and quit. requireJournalFileExists :: FilePath -> IO () requireJournalFileExists "-" = return () requireJournalFileExists f = do exists <- doesFileExist f when (not exists) $ do -- XXX might not be a journal file hPrintf stderr "The hledger journal file \"%s\" was not found.\n" f hPrintf stderr "Please create it first, eg with \"hledger add\" or a text editor.\n" hPrintf stderr "Or, specify an existing journal file with -f or LEDGER_FILE.\n" exitFailure -- | Ensure there is a journal file at the given path, creating an empty one if needed. ensureJournalFileExists :: FilePath -> IO () ensureJournalFileExists f = do exists <- doesFileExist f when (not exists) $ do hPrintf stderr "Creating hledger journal file %s.\n" f -- note Hledger.Utils.UTF8.* do no line ending conversion on windows, -- we currently require unix line endings on all platforms. newJournalContent >>= writeFile f -- | Give the content for a new auto-created journal file. newJournalContent :: IO String newJournalContent = do d <- getCurrentDay return $ printf "; journal created %s by hledger\n" (show d) -- | Read a Journal from the given text trying all readers in turn, or throw an error. readJournal' :: Text -> IO Journal readJournal' t = readJournal Nothing Nothing True Nothing t >>= either error' return tests_readJournal' = [ "readJournal' parses sample journal" ~: do _ <- samplejournal assertBool "" True ] -- | @readJournal mformat mrulesfile assrt mfile txt@ -- -- Read a Journal from some text, or return an error message. -- -- The reader (data format) is chosen based on (in priority order): -- the @mformat@ argument; -- a recognised file name extension in @mfile@ (if provided). -- If none of these identify a known reader, all built-in readers are tried in turn -- (returning the first one's error message if none of them succeed). -- -- A CSV conversion rules file (@mrulesfiles@) can be specified to help convert CSV data. -- -- Optionally, any balance assertions in the journal can be checked (@assrt@). -- readJournal :: Maybe StorageFormat -> Maybe FilePath -> Bool -> Maybe FilePath -> Text -> IO (Either String Journal) readJournal mformat mrulesfile assrt mfile txt = let stablereaders = filter (not.rExperimental) readers rs = maybe stablereaders (:[]) $ findReader mformat mfile in tryReaders rs mrulesfile assrt mfile txt -- | @findReader mformat mpath@ -- -- Find the reader named by @mformat@, if provided. -- Or, if a file path is provided, find the first reader that handles -- its file extension, if any. findReader :: Maybe StorageFormat -> Maybe FilePath -> Maybe Reader findReader Nothing Nothing = Nothing findReader (Just fmt) _ = headMay [r | r <- readers, rFormat r == fmt] findReader Nothing (Just path) = case prefix of Just fmt -> headMay [r | r <- readers, rFormat r == fmt] Nothing -> headMay [r | r <- readers, ext `elem` rExtensions r] where (prefix,path') = splitReaderPrefix path ext = drop 1 $ takeExtension path' -- | @tryReaders readers mrulesfile assrt path t@ -- -- Try to parse the given text to a Journal using each reader in turn, -- returning the first success, or if all of them fail, the first error message. tryReaders :: [Reader] -> Maybe FilePath -> Bool -> Maybe FilePath -> Text -> IO (Either String Journal) tryReaders readers mrulesfile assrt path t = firstSuccessOrFirstError [] readers where firstSuccessOrFirstError :: [String] -> [Reader] -> IO (Either String Journal) firstSuccessOrFirstError [] [] = return $ Left "no readers found" firstSuccessOrFirstError errs (r:rs) = do dbg1IO "trying reader" (rFormat r) result <- (runExceptT . (rParser r) mrulesfile assrt path') t dbg1IO "reader result" $ either id show result case result of Right j -> return $ Right j -- success! Left e -> firstSuccessOrFirstError (errs++[e]) rs -- keep trying firstSuccessOrFirstError (e:_) [] = return $ Left e -- none left, return first error path' = fromMaybe "(string)" path -- tests samplejournal = readJournal' $ T.unlines ["2008/01/01 income" ," assets:bank:checking $1" ," income:salary" ,"" ,"comment" ,"multi line comment here" ,"for testing purposes" ,"end comment" ,"" ,"2008/06/01 gift" ," assets:bank:checking $1" ," income:gifts" ,"" ,"2008/06/02 save" ," assets:bank:saving $1" ," assets:bank:checking" ,"" ,"2008/06/03 * eat & shop" ," expenses:food $1" ," expenses:supplies $1" ," assets:cash" ,"" ,"2008/12/31 * pay off" ," liabilities:debts $1" ," assets:bank:checking" ] tests_Hledger_Read = TestList $ tests_readJournal' ++ [ JournalReader.tests_Hledger_Read_JournalReader, -- LedgerReader.tests_Hledger_Read_LedgerReader, TimeclockReader.tests_Hledger_Read_TimeclockReader, TimedotReader.tests_Hledger_Read_TimedotReader, CsvReader.tests_Hledger_Read_CsvReader, "journal" ~: do r <- runExceptT $ parseWithState mempty JournalReader.journalp "" assertBool "journalp should parse an empty file" (isRight $ r) jE <- readJournal Nothing Nothing True Nothing "" -- don't know how to get it from journal either error' (assertBool "journalp parsing an empty file should give an empty journal" . null . jtxns) jE ] hledger-lib-1.2/Hledger/Read/Common.hs0000644000000000000000000010016513066173044015740 0ustar0000000000000000--- * doc -- Lines beginning "--- *" are collapsible orgstruct nodes. Emacs users, -- (add-hook 'haskell-mode-hook -- (lambda () (set-variable 'orgstruct-heading-prefix-regexp "--- " t)) -- 'orgstruct-mode) -- and press TAB on nodes to expand/collapse. {-| Some common parsers and helpers used by several readers. Some of these might belong in Hledger.Read.JournalReader or Hledger.Read. -} --- * module {-# LANGUAGE CPP, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings #-} module Hledger.Read.Common where --- * imports import Prelude () import Prelude.Compat hiding (readFile) import Control.Monad.Compat import Control.Monad.Except (ExceptT(..), runExceptT, throwError) --, catchError) import Control.Monad.State.Strict import Data.Char (isNumber) import Data.Functor.Identity import Data.List.Compat import Data.List.NonEmpty (NonEmpty(..)) import Data.List.Split (wordsBy) import Data.Maybe import Data.Monoid import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Data.Time.LocalTime import Safe import System.Time (getClockTime) import Text.Megaparsec hiding (parse,State) import Text.Megaparsec.Text import Hledger.Data import Hledger.Utils -- $setup --- * parsing utils -- | Run a string parser with no state in the identity monad. runTextParser, rtp :: TextParser Identity a -> Text -> Either (ParseError Char Dec) a runTextParser p t = runParser p "" t rtp = runTextParser -- | Run a journal parser with a null journal-parsing state. runJournalParser, rjp :: Monad m => TextParser m a -> Text -> m (Either (ParseError Char Dec) a) runJournalParser p t = runParserT p "" t rjp = runJournalParser -- | Run an error-raising journal parser with a null journal-parsing state. runErroringJournalParser, rejp :: Monad m => ErroringJournalParser m a -> Text -> m (Either String a) runErroringJournalParser p t = runExceptT $ runJournalParser (evalStateT p mempty) t >>= either (throwError . parseErrorPretty) return rejp = runErroringJournalParser genericSourcePos :: SourcePos -> GenericSourcePos genericSourcePos p = GenericSourcePos (sourceName p) (fromIntegral . unPos $ sourceLine p) (fromIntegral . unPos $ sourceColumn p) journalSourcePos :: SourcePos -> SourcePos -> GenericSourcePos journalSourcePos p p' = JournalSourcePos (sourceName p) (fromIntegral . unPos $ sourceLine p, fromIntegral $ line') where line' | (unPos $ sourceColumn p') == 1 = unPos (sourceLine p') - 1 | otherwise = unPos $ sourceLine p' -- might be at end of file withat last new-line -- | Given a megaparsec ParsedJournal parser, balance assertion flag, file -- path and file content: parse and post-process a Journal, or give an error. parseAndFinaliseJournal :: ErroringJournalParser IO ParsedJournal -> Bool -> FilePath -> Text -> ExceptT String IO Journal parseAndFinaliseJournal parser assrt f txt = do t <- liftIO getClockTime y <- liftIO getCurrentYear ep <- runParserT (evalStateT parser nulljournal {jparsedefaultyear=Just y}) f txt case ep of Right pj -> case journalFinalise t f txt assrt pj of Right j -> return j Left e -> throwError e Left e -> throwError $ parseErrorPretty e parseAndFinaliseJournal' :: JournalParser ParsedJournal -> Bool -> FilePath -> Text -> ExceptT String IO Journal parseAndFinaliseJournal' parser assrt f txt = do t <- liftIO getClockTime y <- liftIO getCurrentYear let ep = runParser (evalStateT parser nulljournal {jparsedefaultyear=Just y}) f txt case ep of Right pj -> case journalFinalise t f txt assrt pj of Right j -> return j Left e -> throwError e Left e -> throwError $ parseErrorPretty e setYear :: Year -> JournalStateParser m () setYear y = modify' (\j -> j{jparsedefaultyear=Just y}) getYear :: JournalStateParser m (Maybe Year) getYear = fmap jparsedefaultyear get setDefaultCommodityAndStyle :: (CommoditySymbol,AmountStyle) -> JournalStateParser m () setDefaultCommodityAndStyle cs = modify' (\j -> j{jparsedefaultcommodity=Just cs}) getDefaultCommodityAndStyle :: JournalStateParser m (Maybe (CommoditySymbol,AmountStyle)) getDefaultCommodityAndStyle = jparsedefaultcommodity `fmap` get pushAccount :: AccountName -> JournalStateParser m () pushAccount acct = modify' (\j -> j{jaccounts = acct : jaccounts j}) pushParentAccount :: AccountName -> JournalStateParser m () pushParentAccount acct = modify' (\j -> j{jparseparentaccounts = acct : jparseparentaccounts j}) popParentAccount :: JournalStateParser m () popParentAccount = do j <- get case jparseparentaccounts j of [] -> unexpected (Tokens ('E' :| "nd of apply account block with no beginning")) (_:rest) -> put j{jparseparentaccounts=rest} getParentAccount :: JournalStateParser m AccountName getParentAccount = fmap (concatAccountNames . reverse . jparseparentaccounts) get addAccountAlias :: MonadState Journal m => AccountAlias -> m () addAccountAlias a = modify' (\(j@Journal{..}) -> j{jparsealiases=a:jparsealiases}) getAccountAliases :: MonadState Journal m => m [AccountAlias] getAccountAliases = fmap jparsealiases get clearAccountAliases :: MonadState Journal m => m () clearAccountAliases = modify' (\(j@Journal{..}) -> j{jparsealiases=[]}) -- getTransactionCount :: MonadState Journal m => m Integer -- getTransactionCount = fmap jparsetransactioncount get -- -- setTransactionCount :: MonadState Journal m => Integer -> m () -- setTransactionCount i = modify' (\j -> j{jparsetransactioncount=i}) -- -- -- | Increment the transaction index by one and return the new value. -- incrementTransactionCount :: MonadState Journal m => m Integer -- incrementTransactionCount = do -- modify' (\j -> j{jparsetransactioncount=jparsetransactioncount j + 1}) -- getTransactionCount journalAddFile :: (FilePath,Text) -> Journal -> Journal journalAddFile f j@Journal{jfiles=fs} = j{jfiles=fs++[f]} -- append, unlike the other fields, even though we do a final reverse, -- to compensate for additional reversal due to including/monoid-concatting -- -- | Terminate parsing entirely, returning the given error message -- -- with the current parse position prepended. -- parserError :: String -> ErroringJournalParser a -- parserError s = do -- pos <- getPosition -- parserErrorAt pos s -- | Terminate parsing entirely, returning the given error message -- with the given parse position prepended. parserErrorAt :: Monad m => SourcePos -> String -> ErroringJournalParser m a parserErrorAt pos s = throwError $ sourcePosPretty pos ++ ":\n" ++ s --- * parsers --- ** transaction bits statusp :: TextParser m ClearedStatus statusp = choice' [ many spacenonewline >> char '*' >> return Cleared , many spacenonewline >> char '!' >> return Pending , return Uncleared ] "cleared status" codep :: TextParser m String codep = try (do { some spacenonewline; char '(' "codep"; anyChar `manyTill` char ')' } ) <|> return "" descriptionp :: JournalStateParser m String descriptionp = many (noneOf (";\n" :: [Char])) --- ** dates -- | Parse a date in YYYY/MM/DD format. -- Hyphen (-) and period (.) are also allowed as separators. -- The year may be omitted if a default year has been set. -- Leading zeroes may be omitted. datep :: JournalStateParser m Day datep = do -- hacky: try to ensure precise errors for invalid dates -- XXX reported error position is not too good -- pos <- genericSourcePos <$> getPosition datestr <- do c <- digitChar cs <- lift $ many $ choice' [digitChar, datesepchar] return $ c:cs let sepchars = nub $ sort $ filter (`elem` datesepchars) datestr when (length sepchars /= 1) $ fail $ "bad date, different separators used: " ++ datestr let dateparts = wordsBy (`elem` datesepchars) datestr currentyear <- getYear [y,m,d] <- case (dateparts,currentyear) of ([m,d],Just y) -> return [show y,m,d] ([_,_],Nothing) -> fail $ "partial date "++datestr++" found, but the current year is unknown" ([y,m,d],_) -> return [y,m,d] _ -> fail $ "bad date: " ++ datestr let maybedate = fromGregorianValid (read y) (read m) (read d) case maybedate of Nothing -> fail $ "bad date: " ++ datestr Just date -> return date "full or partial date" -- | Parse a date and time in YYYY/MM/DD HH:MM[:SS][+-ZZZZ] format. -- Hyphen (-) and period (.) are also allowed as date separators. -- The year may be omitted if a default year has been set. -- Seconds are optional. -- The timezone is optional and ignored (the time is always interpreted as a local time). -- Leading zeroes may be omitted (except in a timezone). datetimep :: JournalStateParser m LocalTime datetimep = do day <- datep lift $ some spacenonewline h <- some digitChar let h' = read h guard $ h' >= 0 && h' <= 23 char ':' m <- some digitChar let m' = read m guard $ m' >= 0 && m' <= 59 s <- optional $ char ':' >> some digitChar let s' = case s of Just sstr -> read sstr Nothing -> 0 guard $ s' >= 0 && s' <= 59 {- tz <- -} optional $ do plusminus <- oneOf ("-+" :: [Char]) d1 <- digitChar d2 <- digitChar d3 <- digitChar d4 <- digitChar return $ plusminus:d1:d2:d3:d4:"" -- ltz <- liftIO $ getCurrentTimeZone -- let tz' = maybe ltz (fromMaybe ltz . parseTime defaultTimeLocale "%z") tz -- return $ localTimeToUTC tz' $ LocalTime day $ TimeOfDay h' m' (fromIntegral s') return $ LocalTime day $ TimeOfDay h' m' (fromIntegral s') secondarydatep :: Day -> JournalStateParser m Day secondarydatep primarydate = do char '=' -- kludgy way to use primary date for default year let withDefaultYear d p = do y <- getYear let (y',_,_) = toGregorian d in setYear y' r <- p when (isJust y) $ setYear $ fromJust y -- XXX -- mapM setYear <$> y return r withDefaultYear primarydate datep -- | -- >> parsewith twoorthreepartdatestringp "2016/01/2" -- Right "2016/01/2" -- twoorthreepartdatestringp = do -- n1 <- some digitChar -- c <- datesepchar -- n2 <- some digitChar -- mn3 <- optional $ char c >> some digitChar -- return $ n1 ++ c:n2 ++ maybe "" (c:) mn3 --- ** account names -- | Parse an account name, then apply any parent account prefix and/or account aliases currently in effect. modifiedaccountnamep :: JournalStateParser m AccountName modifiedaccountnamep = do parent <- getParentAccount aliases <- getAccountAliases a <- lift accountnamep return $ accountNameApplyAliases aliases $ -- XXX accountNameApplyAliasesMemo ? doesn't seem to make a difference joinAccountNames parent a -- | Parse an account name. Account names start with a non-space, may -- have single spaces inside them, and are terminated by two or more -- spaces (or end of input). Also they have one or more components of -- at least one character, separated by the account separator char. -- (This parser will also consume one following space, if present.) accountnamep :: TextParser m AccountName accountnamep = do astr <- do c <- nonspace cs <- striptrailingspace <$> many (nonspace <|> singlespace) return $ c:cs let a = T.pack astr when (accountNameFromComponents (accountNameComponents a) /= a) (fail $ "account name seems ill-formed: "++astr) return a where singlespace = try (do {spacenonewline; do {notFollowedBy spacenonewline; return ' '}}) striptrailingspace "" = "" striptrailingspace s = if last s == ' ' then init s else s -- accountnamechar = notFollowedBy (oneOf "()[]") >> nonspace -- "account name character (non-bracket, non-parenthesis, non-whitespace)" --- ** amounts -- | Parse whitespace then an amount, with an optional left or right -- currency symbol and optional price, or return the special -- "missing" marker amount. spaceandamountormissingp :: Monad m => JournalStateParser m MixedAmount spaceandamountormissingp = try (do lift $ some spacenonewline (Mixed . (:[])) `fmap` amountp <|> return missingmixedamt ) <|> return missingmixedamt #ifdef TESTS assertParseEqual' :: (Show a, Eq a) => (Either ParseError a) -> a -> Assertion assertParseEqual' parse expected = either (assertFailure.show) (`is'` expected) parse is' :: (Eq a, Show a) => a -> a -> Assertion a `is'` e = assertEqual e a test_spaceandamountormissingp = do assertParseEqual' (parseWithState mempty spaceandamountormissingp " $47.18") (Mixed [usd 47.18]) assertParseEqual' (parseWithState mempty spaceandamountormissingp "$47.18") missingmixedamt assertParseEqual' (parseWithState mempty spaceandamountormissingp " ") missingmixedamt assertParseEqual' (parseWithState mempty spaceandamountormissingp "") missingmixedamt #endif -- | Parse a single-commodity amount, with optional symbol on the left or -- right, optional unit or total price, and optional (ignored) -- ledger-style balance assertion or fixed lot price declaration. amountp :: Monad m => JournalStateParser m Amount amountp = try leftsymbolamountp <|> try rightsymbolamountp <|> nosymbolamountp #ifdef TESTS test_amountp = do assertParseEqual' (parseWithState mempty amountp "$47.18") (usd 47.18) assertParseEqual' (parseWithState mempty amountp "$1.") (usd 1 `withPrecision` 0) -- ,"amount with unit price" ~: do assertParseEqual' (parseWithState mempty amountp "$10 @ €0.5") (usd 10 `withPrecision` 0 `at` (eur 0.5 `withPrecision` 1)) -- ,"amount with total price" ~: do assertParseEqual' (parseWithState mempty amountp "$10 @@ €5") (usd 10 `withPrecision` 0 @@ (eur 5 `withPrecision` 0)) #endif -- | Parse an amount from a string, or get an error. amountp' :: String -> Amount amountp' s = case runParser (evalStateT (amountp <* eof) mempty) "" (T.pack s) of Right amt -> amt Left err -> error' $ show err -- XXX should throwError -- | Parse a mixed amount from a string, or get an error. mamountp' :: String -> MixedAmount mamountp' = Mixed . (:[]) . amountp' signp :: TextParser m String signp = do sign <- optional $ oneOf ("+-" :: [Char]) return $ case sign of Just '-' -> "-" _ -> "" leftsymbolamountp :: Monad m => JournalStateParser m Amount leftsymbolamountp = do sign <- lift signp c <- lift commoditysymbolp sp <- lift $ many spacenonewline (q,prec,mdec,mgrps) <- lift numberp let s = amountstyle{ascommodityside=L, ascommodityspaced=not $ null sp, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} p <- priceamountp let applysign = if sign=="-" then negate else id return $ applysign $ Amount c q p s "left-symbol amount" rightsymbolamountp :: Monad m => JournalStateParser m Amount rightsymbolamountp = do (q,prec,mdec,mgrps) <- lift numberp sp <- lift $ many spacenonewline c <- lift commoditysymbolp p <- priceamountp let s = amountstyle{ascommodityside=R, ascommodityspaced=not $ null sp, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} return $ Amount c q p s "right-symbol amount" nosymbolamountp :: Monad m => JournalStateParser m Amount nosymbolamountp = do (q,prec,mdec,mgrps) <- lift numberp p <- priceamountp -- apply the most recently seen default commodity and style to this commodityless amount defcs <- getDefaultCommodityAndStyle let (c,s) = case defcs of Just (defc,defs) -> (defc, defs{asprecision=max (asprecision defs) prec}) Nothing -> ("", amountstyle{asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}) return $ Amount c q p s "no-symbol amount" commoditysymbolp :: TextParser m CommoditySymbol commoditysymbolp = (quotedcommoditysymbolp <|> simplecommoditysymbolp) "commodity symbol" quotedcommoditysymbolp :: TextParser m CommoditySymbol quotedcommoditysymbolp = do char '"' s <- some $ noneOf (";\n\"" :: [Char]) char '"' return $ T.pack s simplecommoditysymbolp :: TextParser m CommoditySymbol simplecommoditysymbolp = T.pack <$> some (noneOf nonsimplecommoditychars) priceamountp :: Monad m => JournalStateParser m Price priceamountp = try (do lift (many spacenonewline) char '@' try (do char '@' lift (many spacenonewline) a <- amountp -- XXX can parse more prices ad infinitum, shouldn't return $ TotalPrice a) <|> (do lift (many spacenonewline) a <- amountp -- XXX can parse more prices ad infinitum, shouldn't return $ UnitPrice a)) <|> return NoPrice partialbalanceassertionp :: Monad m => JournalStateParser m (Maybe Amount) partialbalanceassertionp = try (do lift (many spacenonewline) char '=' lift (many spacenonewline) a <- amountp -- XXX should restrict to a simple amount return $ Just $ a) <|> return Nothing -- balanceassertion :: Monad m => TextParser m (Maybe MixedAmount) -- balanceassertion = -- try (do -- lift (many spacenonewline) -- string "==" -- lift (many spacenonewline) -- a <- amountp -- XXX should restrict to a simple amount -- return $ Just $ Mixed [a]) -- <|> return Nothing -- http://ledger-cli.org/3.0/doc/ledger3.html#Fixing-Lot-Prices fixedlotpricep :: Monad m => JournalStateParser m (Maybe Amount) fixedlotpricep = try (do lift (many spacenonewline) char '{' lift (many spacenonewline) char '=' lift (many spacenonewline) a <- amountp -- XXX should restrict to a simple amount lift (many spacenonewline) char '}' return $ Just a) <|> return Nothing -- | Parse a string representation of a number for its value and display -- attributes. -- -- Some international number formats are accepted, eg either period or comma -- may be used for the decimal point, and the other of these may be used for -- separating digit groups in the integer part. See -- http://en.wikipedia.org/wiki/Decimal_separator for more examples. -- -- This returns: the parsed numeric value, the precision (number of digits -- seen following the decimal point), the decimal point character used if any, -- and the digit group style if any. -- numberp :: TextParser m (Quantity, Int, Maybe Char, Maybe DigitGroupStyle) numberp = do -- a number is an optional sign followed by a sequence of digits possibly -- interspersed with periods, commas, or both -- ptrace "numberp" sign <- signp parts <- some $ choice' [some digitChar, some $ char ',', some $ char '.'] dbg8 "numberp parsed" (sign,parts) `seq` return () -- check the number is well-formed and identify the decimal point and digit -- group separator characters used, if any let (numparts, puncparts) = partition numeric parts (ok, mdecimalpoint, mseparator) = case (numparts, puncparts) of ([],_) -> (False, Nothing, Nothing) -- no digits, not ok (_,[]) -> (True, Nothing, Nothing) -- digits with no punctuation, ok (_,[[d]]) -> (True, Just d, Nothing) -- just a single punctuation of length 1, assume it's a decimal point (_,[_]) -> (False, Nothing, Nothing) -- a single punctuation of some other length, not ok (_,_:_:_) -> -- two or more punctuations let (s:ss, d) = (init puncparts, last puncparts) -- the leftmost is a separator and the rightmost may be a decimal point in if any ((/=1).length) puncparts -- adjacent punctuation chars, not ok || any (s/=) ss -- separator chars vary, not ok || head parts == s -- number begins with a separator char, not ok then (False, Nothing, Nothing) else if s == d then (True, Nothing, Just $ head s) -- just one kind of punctuation - must be separators else (True, Just $ head d, Just $ head s) -- separator(s) and a decimal point unless ok $ fail $ "number seems ill-formed: "++concat parts -- get the digit group sizes and digit group style if any let (intparts',fracparts') = span ((/= mdecimalpoint) . Just . head) parts (intparts, fracpart) = (filter numeric intparts', filter numeric fracparts') groupsizes = reverse $ case map length intparts of (a:b:cs) | a < b -> b:cs gs -> gs mgrps = (`DigitGroups` groupsizes) <$> mseparator -- put the parts back together without digit group separators, get the precision and parse the value let int = concat $ "":intparts frac = concat $ "":fracpart precision = length frac int' = if null int then "0" else int frac' = if null frac then "0" else frac quantity = read $ sign++int'++"."++frac' -- this read should never fail return $ dbg8 "numberp quantity,precision,mdecimalpoint,mgrps" (quantity,precision,mdecimalpoint,mgrps) "numberp" where numeric = isNumber . headDef '_' -- test_numberp = do -- let s `is` n = assertParseEqual (parseWithState mempty numberp s) n -- assertFails = assertBool . isLeft . parseWithState mempty numberp -- assertFails "" -- "0" `is` (0, 0, '.', ',', []) -- "1" `is` (1, 0, '.', ',', []) -- "1.1" `is` (1.1, 1, '.', ',', []) -- "1,000.1" `is` (1000.1, 1, '.', ',', [3]) -- "1.00.000,1" `is` (100000.1, 1, ',', '.', [3,2]) -- "1,000,000" `is` (1000000, 0, '.', ',', [3,3]) -- "1." `is` (1, 0, '.', ',', []) -- "1," `is` (1, 0, ',', '.', []) -- ".1" `is` (0.1, 1, '.', ',', []) -- ",1" `is` (0.1, 1, ',', '.', []) -- assertFails "1,000.000,1" -- assertFails "1.000,000.1" -- assertFails "1,000.000.1" -- assertFails "1,,1" -- assertFails "1..1" -- assertFails ".1," -- assertFails ",1." --- ** comments multilinecommentp :: JournalStateParser m () multilinecommentp = do string "comment" >> lift (many spacenonewline) >> newline go where go = try (eof <|> (string "end comment" >> newline >> return ())) <|> (anyLine >> go) anyLine = anyChar `manyTill` newline emptyorcommentlinep :: JournalStateParser m () emptyorcommentlinep = do lift (many spacenonewline) >> (commentp <|> (lift (many spacenonewline) >> newline >> return "")) return () -- | Parse a possibly multi-line comment following a semicolon. followingcommentp :: JournalStateParser m Text followingcommentp = -- ptrace "followingcommentp" do samelinecomment <- lift (many spacenonewline) >> (try semicoloncommentp <|> (newline >> return "")) newlinecomments <- many (try (lift (some spacenonewline) >> semicoloncommentp)) return $ T.unlines $ samelinecomment:newlinecomments -- | Parse a possibly multi-line comment following a semicolon, and -- any tags and/or posting dates within it. Posting dates can be -- expressed with "date"/"date2" tags and/or bracketed dates. The -- dates are parsed in full here so that errors are reported in the -- right position. Missing years can be inferred if a default date is -- provided. -- -- >>> rejp (followingcommentandtagsp (Just $ fromGregorian 2000 1 2)) "; a:b, date:3/4, [=5/6]" -- Right ("a:b, date:3/4, [=5/6]\n",[("a","b"),("date","3/4")],Just 2000-03-04,Just 2000-05-06) -- -- Year unspecified and no default provided -> unknown year error, at correct position: -- >>> rejp (followingcommentandtagsp Nothing) " ; xxx date:3/4\n ; second line" -- Left ...1:22...partial date 3/4 found, but the current year is unknown... -- -- Date tag value contains trailing text - forgot the comma, confused: -- the syntaxes ? We'll accept the leading date anyway -- >>> rejp (followingcommentandtagsp (Just $ fromGregorian 2000 1 2)) "; date:3/4=5/6" -- Right ("date:3/4=5/6\n",[("date","3/4=5/6")],Just 2000-03-04,Nothing) -- followingcommentandtagsp :: MonadIO m => Maybe Day -> ErroringJournalParser m (Text, [Tag], Maybe Day, Maybe Day) followingcommentandtagsp mdefdate = do -- pdbg 0 "followingcommentandtagsp" -- Parse a single or multi-line comment, starting on this line or the next one. -- Save the starting position and preserve all whitespace for the subsequent re-parsing, -- to get good error positions. startpos <- getPosition commentandwhitespace :: String <- do let semicoloncommentp' = (:) <$> char ';' <*> anyChar `manyTill` eolof sp1 <- lift (many spacenonewline) l1 <- try (lift semicoloncommentp') <|> (newline >> return "") ls <- lift . many $ try ((++) <$> some spacenonewline <*> semicoloncommentp') return $ unlines $ (sp1 ++ l1) : ls let comment = T.pack $ unlines $ map (lstrip . dropWhile (==';') . strip) $ lines commentandwhitespace -- pdbg 0 $ "commentws:"++show commentandwhitespace -- pdbg 0 $ "comment:"++show comment -- Reparse the comment for any tags. tags <- case runTextParser (setPosition startpos >> tagsp) $ T.pack commentandwhitespace of Right ts -> return ts Left e -> throwError $ parseErrorPretty e -- pdbg 0 $ "tags: "++show tags -- Reparse the comment for any posting dates. Use the transaction date for defaults, if provided. epdates <- liftIO $ rejp (setPosition startpos >> postingdatesp mdefdate) $ T.pack commentandwhitespace pdates <- case epdates of Right ds -> return ds Left e -> throwError e -- pdbg 0 $ "pdates: "++show pdates let mdate = headMay $ map snd $ filter ((=="date").fst) pdates mdate2 = headMay $ map snd $ filter ((=="date2").fst) pdates return (comment, tags, mdate, mdate2) commentp :: JournalStateParser m Text commentp = commentStartingWithp commentchars commentchars :: [Char] commentchars = "#;*" semicoloncommentp :: JournalStateParser m Text semicoloncommentp = commentStartingWithp ";" commentStartingWithp :: [Char] -> JournalStateParser m Text commentStartingWithp cs = do -- ptrace "commentStartingWith" oneOf cs lift (many spacenonewline) l <- anyChar `manyTill` (lift eolof) optional newline return $ T.pack l --- ** tags -- | Extract any tags (name:value ended by comma or newline) embedded in a string. -- -- >>> commentTags "a b:, c:c d:d, e" -- [("b",""),("c","c d:d")] -- -- >>> commentTags "a [1/1/1] [1/1] [1], [=1/1/1] [=1/1] [=1] [1/1=1/1/1] [1=1/1/1] b:c" -- [("b","c")] -- -- --[("date","1/1/1"),("date","1/1"),("date2","1/1/1"),("date2","1/1"),("date","1/1"),("date2","1/1/1"),("date","1"),("date2","1/1/1")] -- -- >>> commentTags "\na b:, \nd:e, f" -- [("b",""),("d","e")] -- commentTags :: Text -> [Tag] commentTags s = case runTextParser tagsp s of Right r -> r Left _ -> [] -- shouldn't happen -- | Parse all tags found in a string. tagsp :: Parser [Tag] tagsp = -- do -- pdbg 0 $ "tagsp" many (try (nontagp >> tagp)) -- | Parse everything up till the first tag. -- -- >>> rtp nontagp "\na b:, \nd:e, f" -- Right "\na " nontagp :: Parser String nontagp = -- do -- pdbg 0 "nontagp" -- anyChar `manyTill` (lookAhead (try (tagorbracketeddatetagsp Nothing >> return ()) <|> eof)) anyChar `manyTill` lookAhead (try (void tagp) <|> eof) -- XXX costly ? -- | Tags begin with a colon-suffixed tag name (a word beginning with -- a letter) and are followed by a tag value (any text up to a comma -- or newline, whitespace-stripped). -- -- >>> rtp tagp "a:b b , c AuxDate: 4/2" -- Right ("a","b b") -- tagp :: Parser Tag tagp = do -- pdbg 0 "tagp" n <- tagnamep v <- tagvaluep return (n,v) -- | -- >>> rtp tagnamep "a:" -- Right "a" tagnamep :: Parser Text tagnamep = -- do -- pdbg 0 "tagnamep" T.pack <$> some (noneOf (": \t\n" :: [Char])) <* char ':' tagvaluep :: TextParser m Text tagvaluep = do -- ptrace "tagvalue" v <- anyChar `manyTill` (void (try (char ',')) <|> eolof) return $ T.pack $ strip $ reverse $ dropWhile (==',') $ reverse $ strip v --- ** posting dates -- | Parse all posting dates found in a string. Posting dates can be -- expressed with date/date2 tags and/or bracketed dates. The dates -- are parsed fully to give useful errors. Missing years can be -- inferred only if a default date is provided. -- postingdatesp :: Monad m => Maybe Day -> ErroringJournalParser m [(TagName,Day)] postingdatesp mdefdate = do -- pdbg 0 $ "postingdatesp" let p = ((:[]) <$> datetagp mdefdate) <|> bracketeddatetagsp mdefdate nonp = many (notFollowedBy p >> anyChar) -- anyChar `manyTill` (lookAhead (try (p >> return ()) <|> eof)) concat <$> many (try (nonp >> p)) --- ** date tags -- | Date tags are tags with name "date" or "date2". Their value is -- parsed as a date, using the provided default date if any for -- inferring a missing year if needed. Any error in date parsing is -- reported and terminates parsing. -- -- >>> rejp (datetagp Nothing) "date: 2000/1/2 " -- Right ("date",2000-01-02) -- -- >>> rejp (datetagp (Just $ fromGregorian 2001 2 3)) "date2:3/4" -- Right ("date2",2001-03-04) -- -- >>> rejp (datetagp Nothing) "date: 3/4" -- Left ...1:9...partial date 3/4 found, but the current year is unknown... -- datetagp :: Monad m => Maybe Day -> ErroringJournalParser m (TagName,Day) datetagp mdefdate = do -- pdbg 0 "datetagp" string "date" n <- T.pack . fromMaybe "" <$> optional (string "2") char ':' startpos <- getPosition v <- lift tagvaluep -- re-parse value as a date. j <- get let ep :: Either (ParseError Char Dec) Day ep = parseWithState' j{jparsedefaultyear=first3.toGregorian <$> mdefdate} -- The value extends to a comma, newline, or end of file. -- It seems like ignoring any extra stuff following a date -- gives better errors here. (do setPosition startpos datep) -- <* eof) v case ep of Left e -> throwError $ parseErrorPretty e Right d -> return ("date"<>n, d) --- ** bracketed dates -- tagorbracketeddatetagsp :: Monad m => Maybe Day -> TextParser u m [Tag] -- tagorbracketeddatetagsp mdefdate = -- bracketeddatetagsp mdefdate <|> ((:[]) <$> tagp) -- | Parse Ledger-style bracketed posting dates ([DATE=DATE2]), as -- "date" and/or "date2" tags. Anything that looks like an attempt at -- this (a square-bracketed sequence of 0123456789/-.= containing at -- least one digit and one date separator) is also parsed, and will -- throw an appropriate error. -- -- The dates are parsed in full here so that errors are reported in -- the right position. A missing year in DATE can be inferred if a -- default date is provided. A missing year in DATE2 will be inferred -- from DATE. -- -- >>> rejp (bracketeddatetagsp Nothing) "[2016/1/2=3/4]" -- Right [("date",2016-01-02),("date2",2016-03-04)] -- -- >>> rejp (bracketeddatetagsp Nothing) "[1]" -- Left ...not a bracketed date... -- -- >>> rejp (bracketeddatetagsp Nothing) "[2016/1/32]" -- Left ...1:11:...bad date: 2016/1/32... -- -- >>> rejp (bracketeddatetagsp Nothing) "[1/31]" -- Left ...1:6:...partial date 1/31 found, but the current year is unknown... -- -- >>> rejp (bracketeddatetagsp Nothing) "[0123456789/-.=/-.=]" -- Left ...1:15:...bad date, different separators... -- bracketeddatetagsp :: Monad m => Maybe Day -> ErroringJournalParser m [(TagName, Day)] bracketeddatetagsp mdefdate = do -- pdbg 0 "bracketeddatetagsp" char '[' startpos <- getPosition let digits = "0123456789" s <- some (oneOf $ '=':digits++datesepchars) char ']' unless (any (`elem` s) digits && any (`elem` datesepchars) s) $ fail "not a bracketed date" -- looks sufficiently like a bracketed date, now we -- re-parse as dates and throw any errors j <- get let ep :: Either (ParseError Char Dec) (Maybe Day, Maybe Day) ep = parseWithState' j{jparsedefaultyear=first3.toGregorian <$> mdefdate} (do setPosition startpos md1 <- optional datep maybe (return ()) (setYear.first3.toGregorian) md1 md2 <- optional $ char '=' >> datep eof return (md1,md2) ) (T.pack s) case ep of Left e -> throwError $ parseErrorPretty e Right (md1,md2) -> return $ catMaybes [("date",) <$> md1, ("date2",) <$> md2] hledger-lib-1.2/Hledger/Read/CsvReader.hs0000644000000000000000000007756313066173044016405 0ustar0000000000000000{-# LANGUAGE CPP #-} {-| A reader for CSV data, using an extra rules file to help interpret the data. -} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE TypeSynonymInstances #-} {-# LANGUAGE FlexibleInstances #-} module Hledger.Read.CsvReader ( -- * Reader reader, -- * Misc. CsvRecord, -- rules, rulesFileFor, parseRulesFile, parseAndValidateCsvRules, expandIncludes, transactionFromCsvRecord, -- * Tests tests_Hledger_Read_CsvReader ) where import Prelude () import Prelude.Compat hiding (getContents) import Control.Exception hiding (try) import Control.Monad import Control.Monad.Except import Control.Monad.State.Strict (StateT, get, modify', evalStateT) -- import Test.HUnit import Data.Char (toLower, isDigit, isSpace) import Data.List.Compat import Data.Maybe import Data.Ord import qualified Data.Set as S import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar (Day) #if MIN_VERSION_time(1,5,0) import Data.Time.Format (parseTimeM, defaultTimeLocale) #else import Data.Time.Format (parseTime) import System.Locale (defaultTimeLocale) #endif import Safe import System.Directory (doesFileExist) import System.FilePath import System.IO (stderr) import Test.HUnit hiding (State) import Text.CSV (parseCSV, CSV) import Text.Megaparsec hiding (parse, State) import Text.Megaparsec.Text import qualified Text.Parsec as Parsec import Text.Printf (hPrintf,printf) import Hledger.Data import Hledger.Utils.UTF8IOCompat (getContents) import Hledger.Utils import Hledger.Read.Common (amountp, statusp, genericSourcePos) reader :: Reader reader = Reader {rFormat = "csv" ,rExtensions = ["csv"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from CSV data, or give an error. -- XXX currently ignores the string and reads from the file path parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse rulesfile _ f t = do r <- liftIO $ readJournalFromCsv rulesfile f t case r of Left e -> throwError e Right j -> return $ journalNumberAndTieTransactions j -- XXX does not use parseAndFinaliseJournal like the other readers -- | Read a Journal from the given CSV data (and filename, used for error -- messages), or return an error. Proceed as follows: -- @ -- 1. parse CSV conversion rules from the specified rules file, or from -- the default rules file for the specified CSV file, if it exists, -- or throw a parse error; if it doesn't exist, use built-in default rules -- 2. parse the CSV data, or throw a parse error -- 3. convert the CSV records to transactions using the rules -- 4. if the rules file didn't exist, create it with the default rules and filename -- 5. return the transactions as a Journal -- @ readJournalFromCsv :: Maybe FilePath -> FilePath -> Text -> IO (Either String Journal) readJournalFromCsv Nothing "-" _ = return $ Left "please use --rules-file when reading CSV from stdin" readJournalFromCsv mrulesfile csvfile csvdata = handle (\e -> return $ Left $ show (e :: IOException)) $ do let throwerr = throw.userError -- parse rules let rulesfile = fromMaybe (rulesFileFor csvfile) mrulesfile rulesfileexists <- doesFileExist rulesfile rulestext <- if rulesfileexists then do hPrintf stderr "using conversion rules file %s\n" rulesfile liftIO $ (readFile' rulesfile >>= expandIncludes (takeDirectory rulesfile)) else return $ defaultRulesText rulesfile rules <- liftIO (runExceptT $ parseAndValidateCsvRules rulesfile rulestext) >>= either throwerr return dbg2IO "rules" rules -- apply skip directive let skip = maybe 0 oneorerror $ getDirective "skip" rules where oneorerror "" = 1 oneorerror s = readDef (throwerr $ "could not parse skip value: " ++ show s) s -- parse csv -- parsec seems to fail if you pass it "-" here XXX try again with megaparsec let parsecfilename = if csvfile == "-" then "(stdin)" else csvfile records <- (either throwerr id . dbg2 "validateCsv" . validateCsv skip . dbg2 "parseCsv") `fmap` parseCsv parsecfilename (T.unpack csvdata) dbg1IO "first 3 csv records" $ take 3 records -- identify header lines -- let (headerlines, datalines) = identifyHeaderLines records -- mfieldnames = lastMay headerlines -- convert to transactions and return as a journal let txns = snd $ mapAccumL (\pos r -> (pos, transactionFromCsvRecord (let SourcePos name line col = pos in SourcePos name (unsafePos $ unPos line + 1) col) rules r)) (initialPos parsecfilename) records -- heuristic: if the records appear to have been in reverse date order, -- reverse them all as well as doing a txn date sort, -- so that same-day txns' original order is preserved txns' | length txns > 1 && tdate (head txns) > tdate (last txns) = reverse txns | otherwise = txns when (not rulesfileexists) $ do hPrintf stderr "created default conversion rules file %s, edit this for better results\n" rulesfile writeFile rulesfile $ T.unpack rulestext return $ Right nulljournal{jtxns=sortBy (comparing tdate) txns'} parseCsv :: FilePath -> String -> IO (Either Parsec.ParseError CSV) parseCsv path csvdata = case path of "-" -> liftM (parseCSV "(stdin)") getContents _ -> return $ parseCSV path csvdata -- | Return the cleaned up and validated CSV data, or an error. validateCsv :: Int -> Either Parsec.ParseError CSV -> Either String [CsvRecord] validateCsv _ (Left e) = Left $ show e validateCsv numhdrlines (Right rs) = validate $ drop numhdrlines $ filternulls rs where filternulls = filter (/=[""]) validate [] = Left "no CSV records found" validate rs@(first:_) | isJust lessthan2 = let r = fromJust lessthan2 in Left $ printf "CSV record %s has less than two fields" (show r) | isJust different = let r = fromJust different in Left $ printf "the first CSV record %s has %d fields but %s has %d" (show first) length1 (show r) (length r) | otherwise = Right rs where length1 = length first lessthan2 = headMay $ filter ((<2).length) rs different = headMay $ filter ((/=length1).length) rs -- -- | The highest (0-based) field index referenced in the field -- -- definitions, or -1 if no fields are defined. -- maxFieldIndex :: CsvRules -> Int -- maxFieldIndex r = maximumDef (-1) $ catMaybes [ -- dateField r -- ,statusField r -- ,codeField r -- ,amountField r -- ,amountInField r -- ,amountOutField r -- ,currencyField r -- ,accountField r -- ,account2Field r -- ,date2Field r -- ] -- rulesFileFor :: CliOpts -> FilePath -> FilePath -- rulesFileFor CliOpts{rules_file_=Just f} _ = f -- rulesFileFor CliOpts{rules_file_=Nothing} csvfile = replaceExtension csvfile ".rules" rulesFileFor :: FilePath -> FilePath rulesFileFor = (++ ".rules") csvFileFor :: FilePath -> FilePath csvFileFor = reverse . drop 6 . reverse defaultRulesText :: FilePath -> Text defaultRulesText csvfile = T.pack $ unlines ["# hledger csv conversion rules for " ++ csvFileFor (takeFileName csvfile) ,"# cf http://hledger.org/manual#csv-files" ,"" ,"account1 assets:bank:checking" ,"" ,"fields date, description, amount" ,"" ,"#skip 1" ,"" ,"#date-format %-d/%-m/%Y" ,"#date-format %-m/%-d/%Y" ,"#date-format %Y-%h-%d" ,"" ,"#currency $" ,"" ,"if ITUNES" ," account2 expenses:entertainment" ,"" ,"if (TO|FROM) SAVINGS" ," account2 assets:bank:savings\n" ] -------------------------------------------------------------------------------- -- Conversion rules parsing {- Grammar for the CSV conversion rules, more or less: RULES: RULE* RULE: ( FIELD-LIST | FIELD-ASSIGNMENT | CONDITIONAL-BLOCK | SKIP | DATE-FORMAT | COMMENT | BLANK ) NEWLINE FIELD-LIST: fields SPACE FIELD-NAME ( SPACE? , SPACE? FIELD-NAME )* FIELD-NAME: QUOTED-FIELD-NAME | BARE-FIELD-NAME QUOTED-FIELD-NAME: " (any CHAR except double-quote)+ " BARE-FIELD-NAME: any CHAR except space, tab, #, ; FIELD-ASSIGNMENT: JOURNAL-FIELD ASSIGNMENT-SEPARATOR FIELD-VALUE JOURNAL-FIELD: date | date2 | status | code | description | comment | account1 | account2 | amount | JOURNAL-PSEUDO-FIELD JOURNAL-PSEUDO-FIELD: amount-in | amount-out | currency ASSIGNMENT-SEPARATOR: SPACE | ( : SPACE? ) FIELD-VALUE: VALUE (possibly containing CSV-FIELD-REFERENCEs) CSV-FIELD-REFERENCE: % CSV-FIELD CSV-FIELD: ( FIELD-NAME | FIELD-NUMBER ) (corresponding to a CSV field) FIELD-NUMBER: DIGIT+ CONDITIONAL-BLOCK: if ( FIELD-MATCHER NEWLINE )+ INDENTED-BLOCK FIELD-MATCHER: ( CSV-FIELD-NAME SPACE? )? ( MATCHOP SPACE? )? PATTERNS MATCHOP: ~ PATTERNS: ( NEWLINE REGEXP )* REGEXP INDENTED-BLOCK: ( SPACE ( FIELD-ASSIGNMENT | COMMENT ) NEWLINE )+ REGEXP: ( NONSPACE CHAR* ) SPACE? VALUE: SPACE? ( CHAR* ) SPACE? COMMENT: SPACE? COMMENT-CHAR VALUE COMMENT-CHAR: # | ; NONSPACE: any CHAR not a SPACE-CHAR BLANK: SPACE? SPACE: SPACE-CHAR+ SPACE-CHAR: space | tab CHAR: any character except newline DIGIT: 0-9 -} {- | A set of data definitions and account-matching patterns sufficient to convert a particular CSV data file into meaningful journal transactions. -} data CsvRules = CsvRules { rdirectives :: [(DirectiveName,String)], rcsvfieldindexes :: [(CsvFieldName, CsvFieldIndex)], rassignments :: [(JournalFieldName, FieldTemplate)], rconditionalblocks :: [ConditionalBlock] } deriving (Show, Eq) type CsvRulesParser a = StateT CsvRules Parser a type DirectiveName = String type CsvFieldName = String type CsvFieldIndex = Int type JournalFieldName = String type FieldTemplate = String type ConditionalBlock = ([RecordMatcher], [(JournalFieldName, FieldTemplate)]) -- block matches if all RecordMatchers match type RecordMatcher = [RegexpPattern] -- match if any regexps match any of the csv fields -- type FieldMatcher = (CsvFieldName, [RegexpPattern]) -- match if any regexps match this csv field type DateFormat = String type RegexpPattern = String rules = CsvRules { rdirectives=[], rcsvfieldindexes=[], rassignments=[], rconditionalblocks=[] } addDirective :: (DirectiveName, String) -> CsvRules -> CsvRules addDirective d r = r{rdirectives=d:rdirectives r} addAssignment :: (JournalFieldName, FieldTemplate) -> CsvRules -> CsvRules addAssignment a r = r{rassignments=a:rassignments r} setIndexesAndAssignmentsFromList :: [CsvFieldName] -> CsvRules -> CsvRules setIndexesAndAssignmentsFromList fs r = addAssignmentsFromList fs . setCsvFieldIndexesFromList fs $ r setCsvFieldIndexesFromList :: [CsvFieldName] -> CsvRules -> CsvRules setCsvFieldIndexesFromList fs r = r{rcsvfieldindexes=zip fs [1..]} addAssignmentsFromList :: [CsvFieldName] -> CsvRules -> CsvRules addAssignmentsFromList fs r = foldl' maybeAddAssignment r journalfieldnames where maybeAddAssignment rules f = (maybe id addAssignmentFromIndex $ elemIndex f fs) rules where addAssignmentFromIndex i = addAssignment (f, "%"++show (i+1)) addConditionalBlock :: ConditionalBlock -> CsvRules -> CsvRules addConditionalBlock b r = r{rconditionalblocks=b:rconditionalblocks r} getDirective :: DirectiveName -> CsvRules -> Maybe FieldTemplate getDirective directivename = lookup directivename . rdirectives instance ShowErrorComponent String where showErrorComponent = id -- | An error-throwing action that parses this file's content -- as CSV conversion rules, interpolating any included files first, -- and runs some extra validation checks. parseRulesFile :: FilePath -> ExceptT String IO CsvRules parseRulesFile f = liftIO (readFile' f >>= expandIncludes (takeDirectory f)) >>= parseAndValidateCsvRules f -- | Look for hledger rules file-style include directives in this text, -- and interpolate the included files, recursively. -- Included file paths may be relative to the directory of the -- provided file path. -- This is a cheap hack to avoid rewriting the CSV rules parser. expandIncludes :: FilePath -> T.Text -> IO T.Text expandIncludes basedir content = do let (ls,rest) = break (T.isPrefixOf "include") $ T.lines content case rest of [] -> return $ T.unlines ls ((T.stripPrefix "include" -> Just f):ls') -> do let f' = basedir dropWhile isSpace (T.unpack f) basedir' = takeDirectory f' included <- readFile' f' >>= expandIncludes basedir' return $ T.unlines [T.unlines ls, included, T.unlines ls'] ls' -> return $ T.unlines $ ls ++ ls' -- should never get here -- | An error-throwing action that parses this text as CSV conversion rules -- and runs some extra validation checks. The file path is for error messages. parseAndValidateCsvRules :: FilePath -> T.Text -> ExceptT String IO CsvRules parseAndValidateCsvRules rulesfile s = do let rules = parseCsvRules rulesfile s case rules of Left e -> ExceptT $ return $ Left $ parseErrorPretty e Right r -> do r_ <- liftIO $ runExceptT $ validateRules r ExceptT $ case r_ of Left e -> return $ Left $ parseErrorPretty $ toParseError e Right r -> return $ Right r where toParseError :: forall s. Ord s => s -> ParseError Char s toParseError s = (mempty :: ParseError Char s) { errorCustom = S.singleton s} -- | Parse this text as CSV conversion rules. The file path is for error messages. parseCsvRules :: FilePath -> T.Text -> Either (ParseError Char Dec) CsvRules -- parseCsvRules rulesfile s = runParser csvrulesfile nullrules{baseAccount=takeBaseName rulesfile} rulesfile s parseCsvRules rulesfile s = runParser (evalStateT rulesp rules) rulesfile s -- | Return the validated rules, or an error. validateRules :: CsvRules -> ExceptT String IO CsvRules validateRules rules = do unless (isAssigned "date") $ ExceptT $ return $ Left "Please specify (at top level) the date field. Eg: date %1\n" unless ((amount && not (amountin || amountout)) || (not amount && (amountin && amountout))) $ ExceptT $ return $ Left "Please specify (at top level) either the amount field, or both the amount-in and amount-out fields. Eg: amount %2\n" ExceptT $ return $ Right rules where amount = isAssigned "amount" amountin = isAssigned "amount-in" amountout = isAssigned "amount-out" isAssigned f = isJust $ getEffectiveAssignment rules [] f -- parsers rulesp :: CsvRulesParser CsvRules rulesp = do many $ choiceInState [blankorcommentlinep "blank or comment line" ,(directivep >>= modify' . addDirective) "directive" ,(fieldnamelistp >>= modify' . setIndexesAndAssignmentsFromList) "field name list" ,(fieldassignmentp >>= modify' . addAssignment) "field assignment" ,(conditionalblockp >>= modify' . addConditionalBlock) "conditional block" ] eof r <- get return r{rdirectives=reverse $ rdirectives r ,rassignments=reverse $ rassignments r ,rconditionalblocks=reverse $ rconditionalblocks r } blankorcommentlinep :: CsvRulesParser () blankorcommentlinep = lift (pdbg 3 "trying blankorcommentlinep") >> choiceInState [blanklinep, commentlinep] blanklinep :: CsvRulesParser () blanklinep = lift (many spacenonewline) >> newline >> return () "blank line" commentlinep :: CsvRulesParser () commentlinep = lift (many spacenonewline) >> commentcharp >> lift restofline >> return () "comment line" commentcharp :: CsvRulesParser Char commentcharp = oneOf (";#*" :: [Char]) directivep :: CsvRulesParser (DirectiveName, String) directivep = (do lift $ pdbg 3 "trying directive" d <- choiceInState $ map string directives v <- (((char ':' >> lift (many spacenonewline)) <|> lift (some spacenonewline)) >> directivevalp) <|> (optional (char ':') >> lift (many spacenonewline) >> lift eolof >> return "") return (d,v) ) "directive" directives = ["date-format" -- ,"default-account1" -- ,"default-currency" -- ,"skip-lines" -- old ,"skip" -- ,"base-account" -- ,"base-currency" ] directivevalp :: CsvRulesParser String directivevalp = anyChar `manyTill` lift eolof fieldnamelistp :: CsvRulesParser [CsvFieldName] fieldnamelistp = (do lift $ pdbg 3 "trying fieldnamelist" string "fields" optional $ char ':' lift (some spacenonewline) let separator = lift (many spacenonewline) >> char ',' >> lift (many spacenonewline) f <- fromMaybe "" <$> optional fieldnamep fs <- some $ (separator >> fromMaybe "" <$> optional fieldnamep) lift restofline return $ map (map toLower) $ f:fs ) "field name list" fieldnamep :: CsvRulesParser String fieldnamep = quotedfieldnamep <|> barefieldnamep quotedfieldnamep :: CsvRulesParser String quotedfieldnamep = do char '"' f <- some $ noneOf ("\"\n:;#~" :: [Char]) char '"' return f barefieldnamep :: CsvRulesParser String barefieldnamep = some $ noneOf (" \t\n,;#~" :: [Char]) fieldassignmentp :: CsvRulesParser (JournalFieldName, FieldTemplate) fieldassignmentp = do lift $ pdbg 3 "trying fieldassignmentp" f <- journalfieldnamep assignmentseparatorp v <- fieldvalp return (f,v) "field assignment" journalfieldnamep :: CsvRulesParser String journalfieldnamep = lift (pdbg 2 "trying journalfieldnamep") >> choiceInState (map string journalfieldnames) journalfieldnames = [-- pseudo fields: "amount-in" ,"amount-out" ,"currency" -- standard fields: ,"date2" ,"date" ,"status" ,"code" ,"description" ,"amount" ,"account1" ,"account2" ,"comment" ] assignmentseparatorp :: CsvRulesParser () assignmentseparatorp = do lift $ pdbg 3 "trying assignmentseparatorp" choice [ -- try (lift (many spacenonewline) >> oneOf ":="), try (lift (many spacenonewline) >> char ':'), spaceChar ] _ <- lift (many spacenonewline) return () fieldvalp :: CsvRulesParser String fieldvalp = do lift $ pdbg 2 "trying fieldvalp" anyChar `manyTill` lift eolof conditionalblockp :: CsvRulesParser ConditionalBlock conditionalblockp = do lift $ pdbg 3 "trying conditionalblockp" string "if" >> lift (many spacenonewline) >> optional newline ms <- some recordmatcherp as <- many (lift (some spacenonewline) >> fieldassignmentp) when (null as) $ fail "start of conditional block found, but no assignment rules afterward\n(assignment rules in a conditional block should be indented)\n" return (ms, as) "conditional block" recordmatcherp :: CsvRulesParser [String] recordmatcherp = do lift $ pdbg 2 "trying recordmatcherp" -- pos <- currentPos _ <- optional (matchoperatorp >> lift (many spacenonewline) >> optional newline) ps <- patternsp when (null ps) $ fail "start of record matcher found, but no patterns afterward\n(patterns should not be indented)\n" return ps "record matcher" matchoperatorp :: CsvRulesParser String matchoperatorp = choiceInState $ map string ["~" -- ,"!~" -- ,"=" -- ,"!=" ] patternsp :: CsvRulesParser [String] patternsp = do lift $ pdbg 3 "trying patternsp" ps <- many regexp return ps regexp :: CsvRulesParser String regexp = do lift $ pdbg 3 "trying regexp" notFollowedBy matchoperatorp c <- lift nonspace cs <- anyChar `manyTill` lift eolof return $ strip $ c:cs -- fieldmatcher = do -- pdbg 2 "trying fieldmatcher" -- f <- fromMaybe "all" `fmap` (optional $ do -- f' <- fieldname -- lift (many spacenonewline) -- return f') -- char '~' -- lift (many spacenonewline) -- ps <- patterns -- let r = "(" ++ intercalate "|" ps ++ ")" -- return (f,r) -- "field matcher" -------------------------------------------------------------------------------- -- Converting CSV records to journal transactions type CsvRecord = [String] -- Convert a CSV record to a transaction using the rules, or raise an -- error if the data can not be parsed. transactionFromCsvRecord :: SourcePos -> CsvRules -> CsvRecord -> Transaction transactionFromCsvRecord sourcepos rules record = t where mdirective = (`getDirective` rules) mfieldtemplate = getEffectiveAssignment rules record render = renderTemplate rules record mskip = mdirective "skip" mdefaultcurrency = mdirective "default-currency" mparsedate = parseDateWithFormatOrDefaultFormats (mdirective "date-format") -- render each field using its template and the csv record, and -- in some cases parse the rendered string (eg dates and amounts) mdateformat = mdirective "date-format" date = render $ fromMaybe "" $ mfieldtemplate "date" date' = fromMaybe (error' $ dateerror "date" date mdateformat) $ mparsedate date mdate2 = maybe Nothing (Just . render) $ mfieldtemplate "date2" mdate2' = maybe Nothing (maybe (error' $ dateerror "date2" (fromMaybe "" mdate2) mdateformat) Just . mparsedate) mdate2 dateerror datefield value mdateformat = unlines ["error: could not parse \""++value++"\" as a date using date format "++maybe "\"YYYY/M/D\", \"YYYY-M-D\" or \"YYYY.M.D\"" show mdateformat ,"the CSV record is: "++intercalate ", " (map show record) ,"the "++datefield++" rule is: "++(fromMaybe "required, but missing" $ mfieldtemplate datefield) ,"the date-format is: "++fromMaybe "unspecified" mdateformat ,"you may need to " ++"change your "++datefield++" rule, " ++maybe "add a" (const "change your") mdateformat++" date-format rule, " ++"or "++maybe "add a" (const "change your") mskip++" skip rule" ,"for m/d/y or d/m/y dates, use date-format %-m/%-d/%Y or date-format %-d/%-m/%Y" ] status = case mfieldtemplate "status" of Nothing -> Uncleared Just str -> either statuserror id . runParser (statusp <* eof) "" . T.pack $ render str where statuserror err = error' $ unlines ["error: could not parse \""++str++"\" as a cleared status (should be *, ! or empty)" ,"the parse error is: "++show err ] code = maybe "" render $ mfieldtemplate "code" description = maybe "" render $ mfieldtemplate "description" comment = maybe "" render $ mfieldtemplate "comment" precomment = maybe "" render $ mfieldtemplate "precomment" currency = maybe (fromMaybe "" mdefaultcurrency) render $ mfieldtemplate "currency" amountstr = (currency++) $ negateIfParenthesised $ getAmountStr rules record amount = either amounterror (Mixed . (:[])) $ runParser (evalStateT (amountp <* eof) mempty) "" $ T.pack amountstr amounterror err = error' $ unlines ["error: could not parse \""++amountstr++"\" as an amount" ,showRecord record ,"the amount rule is: "++(fromMaybe "" $ mfieldtemplate "amount") ,"the currency rule is: "++(fromMaybe "unspecified" $ mfieldtemplate "currency") ,"the default-currency is: "++fromMaybe "unspecified" mdefaultcurrency ,"the parse error is: "++show err ,"you may need to " ++"change your amount or currency rules, " ++"or "++maybe "add a" (const "change your") mskip++" skip rule" ] -- Using costOfMixedAmount here to allow complex costs like "10 GBP @@ 15 USD". -- Aim is to have "10 GBP @@ 15 USD" applied to account2, but have "-15USD" applied to account1 amount1 = costOfMixedAmount amount amount2 = (-amount) s `or` def = if null s then def else s defaccount1 = fromMaybe "unknown" $ mdirective "default-account1" defaccount2 = case isNegativeMixedAmount amount2 of Just True -> "income:unknown" _ -> "expenses:unknown" account1 = T.pack $ maybe "" render (mfieldtemplate "account1") `or` defaccount1 account2 = T.pack $ maybe "" render (mfieldtemplate "account2") `or` defaccount2 -- build the transaction t = nulltransaction{ tsourcepos = genericSourcePos sourcepos, tdate = date', tdate2 = mdate2', tstatus = status, tcode = T.pack code, tdescription = T.pack description, tcomment = T.pack comment, tpreceding_comment_lines = T.pack precomment, tpostings = [posting {paccount=account2, pamount=amount2, ptransaction=Just t} ,posting {paccount=account1, pamount=amount1, ptransaction=Just t} ] } getAmountStr :: CsvRules -> CsvRecord -> String getAmountStr rules record = let mamount = getEffectiveAssignment rules record "amount" mamountin = getEffectiveAssignment rules record "amount-in" mamountout = getEffectiveAssignment rules record "amount-out" render = fmap (strip . renderTemplate rules record) in case (render mamount, render mamountin, render mamountout) of (Just "", Nothing, Nothing) -> error' $ "amount has no value\n"++showRecord record (Just a, Nothing, Nothing) -> a (Nothing, Just "", Just "") -> error' $ "neither amount-in or amount-out has a value\n"++showRecord record (Nothing, Just i, Just "") -> i (Nothing, Just "", Just o) -> negateStr o (Nothing, Just _, Just _) -> error' $ "both amount-in and amount-out have a value\n"++showRecord record _ -> error' $ "found values for amount and for amount-in/amount-out - please use either amount or amount-in/amount-out\n"++showRecord record negateIfParenthesised :: String -> String negateIfParenthesised ('(':s) | lastMay s == Just ')' = negateStr $ init s negateIfParenthesised s = s negateStr :: String -> String negateStr ('-':s) = s negateStr s = '-':s -- | Show a (approximate) recreation of the original CSV record. showRecord :: CsvRecord -> String showRecord r = "the CSV record is: "++intercalate ", " (map show r) -- | Given the conversion rules, a CSV record and a journal entry field name, find -- the template value ultimately assigned to this field, either at top -- level or in a matching conditional block. Conditional blocks' -- patterns are matched against an approximation of the original CSV -- record: all the field values with commas intercalated. getEffectiveAssignment :: CsvRules -> CsvRecord -> JournalFieldName -> Maybe FieldTemplate getEffectiveAssignment rules record f = lastMay $ assignmentsFor f where assignmentsFor f = map snd $ filter ((==f).fst) $ toplevelassignments ++ conditionalassignments where toplevelassignments = rassignments rules conditionalassignments = concatMap snd $ filter blockMatches $ blocksAssigning f where blocksAssigning f = filter (any ((==f).fst) . snd) $ rconditionalblocks rules blockMatches :: ConditionalBlock -> Bool blockMatches (matchers,_) = all matcherMatches matchers where matcherMatches :: RecordMatcher -> Bool -- matcherMatches pats = any patternMatches pats matcherMatches pats = patternMatches $ "(" ++ intercalate "|" pats ++ ")" where patternMatches :: RegexpPattern -> Bool patternMatches pat = regexMatchesCI pat csvline where csvline = intercalate "," record renderTemplate :: CsvRules -> CsvRecord -> FieldTemplate -> String renderTemplate rules record t = regexReplaceBy "%[A-z0-9]+" replace t where replace ('%':pat) = maybe pat (\i -> atDef "" record (i-1)) mi where mi | all isDigit pat = readMay pat | otherwise = lookup pat $ rcsvfieldindexes rules replace pat = pat -- Parse the date string using the specified date-format, or if unspecified try these default formats: -- YYYY/MM/DD, YYYY-MM-DD, YYYY.MM.DD, MM/DD/YYYY (month and day can be 1 or 2 digits, year must be 4). parseDateWithFormatOrDefaultFormats :: Maybe DateFormat -> String -> Maybe Day parseDateWithFormatOrDefaultFormats mformat s = firstJust $ map parsewith formats where parsetime = #if MIN_VERSION_time(1,5,0) parseTimeM True #else parseTime #endif parsewith = flip (parsetime defaultTimeLocale) s formats = maybe ["%Y/%-m/%-d" ,"%Y-%-m-%-d" ,"%Y.%-m.%-d" -- ,"%-m/%-d/%Y" -- ,parseTime defaultTimeLocale "%Y/%m/%e" (take 5 s ++ "0" ++ drop 5 s) -- ,parseTime defaultTimeLocale "%Y-%m-%e" (take 5 s ++ "0" ++ drop 5 s) -- ,parseTime defaultTimeLocale "%m/%e/%Y" ('0':s) -- ,parseTime defaultTimeLocale "%m-%e-%Y" ('0':s) ] (:[]) mformat -------------------------------------------------------------------------------- -- tests tests_Hledger_Read_CsvReader = TestList (test_parser) -- ++ test_description_parsing) -- test_description_parsing = [ -- "description-field 1" ~: assertParseDescription "description-field 1\n" [FormatField False Nothing Nothing (FieldNo 1)] -- , "description-field 1 " ~: assertParseDescription "description-field 1 \n" [FormatField False Nothing Nothing (FieldNo 1)] -- , "description-field %(1)" ~: assertParseDescription "description-field %(1)\n" [FormatField False Nothing Nothing (FieldNo 1)] -- , "description-field %(1)/$(2)" ~: assertParseDescription "description-field %(1)/%(2)\n" [ -- FormatField False Nothing Nothing (FieldNo 1) -- , FormatLiteral "/" -- , FormatField False Nothing Nothing (FieldNo 2) -- ] -- ] -- where -- assertParseDescription string expected = do assertParseEqual (parseDescription string) (rules {descriptionField = expected}) -- parseDescription :: String -> Either ParseError CsvRules -- parseDescription x = runParser descriptionfieldWrapper rules "(unknown)" x -- descriptionfieldWrapper :: GenParser Char CsvRules CsvRules -- descriptionfieldWrapper = do -- descriptionfield -- r <- getState -- return r test_parser = [ "convert rules parsing: empty file" ~: do -- let assertMixedAmountParse parseresult mixedamount = -- (either (const "parse error") showMixedAmountDebug parseresult) ~?= (showMixedAmountDebug mixedamount) assertParseEqual (parseCsvRules "unknown" "") rules -- ,"convert rules parsing: accountrule" ~: do -- assertParseEqual (parseWithState rules accountrule "A\na\n") -- leading blank line required -- ([("A",Nothing)], "a") ,"convert rules parsing: trailing comments" ~: do assertParse (parseWithState' rules rulesp "skip\n# \n#\n") ,"convert rules parsing: trailing blank lines" ~: do assertParse (parseWithState' rules rulesp "skip\n\n \n") ,"convert rules parsing: empty field value" ~: do assertParse (parseWithState' rules rulesp "account1 \nif foo\n account2 foo\n") -- not supported -- ,"convert rules parsing: no final newline" ~: do -- assertParse (parseWithState rules csvrulesfile "A\na") -- assertParse (parseWithState rules csvrulesfile "A\na\n# \n#") -- assertParse (parseWithState rules csvrulesfile "A\na\n\n ") -- (rules{ -- -- dateField=Maybe FieldPosition, -- -- statusField=Maybe FieldPosition, -- -- codeField=Maybe FieldPosition, -- -- descriptionField=Maybe FieldPosition, -- -- amountField=Maybe FieldPosition, -- -- currencyField=Maybe FieldPosition, -- -- baseCurrency=Maybe String, -- -- baseAccount=AccountName, -- accountRules=[ -- ([("A",Nothing)], "a") -- ] -- }) ] hledger-lib-1.2/Hledger/Read/JournalReader.hs0000644000000000000000000006113613066173044017251 0ustar0000000000000000--- * doc -- Lines beginning "--- *" are collapsible orgstruct nodes. Emacs users, -- (add-hook 'haskell-mode-hook -- (lambda () (set-variable 'orgstruct-heading-prefix-regexp "--- " t)) -- 'orgstruct-mode) -- and press TAB on nodes to expand/collapse. {-| A reader for hledger's journal file format (). hledger's journal format is a compatible subset of c++ ledger's (), so this reader should handle many ledger files as well. Example: @ 2012\/3\/24 gift expenses:gifts $10 assets:cash @ Journal format supports the include directive which can read files in other formats, so the other file format readers need to be importable here. Some low-level journal syntax parsers which those readers also use are therefore defined separately in Hledger.Read.Common, avoiding import cycles. -} --- * module {-# LANGUAGE CPP, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings #-} module Hledger.Read.JournalReader ( --- * exports -- * Reader reader, -- * Parsing utils genericSourcePos, parseAndFinaliseJournal, runJournalParser, rjp, runErroringJournalParser, rejp, -- * Parsers used elsewhere getParentAccount, journalp, directivep, defaultyeardirectivep, marketpricedirectivep, datetimep, datep, -- codep, -- accountnamep, modifiedaccountnamep, postingp, -- amountp, -- amountp', -- mamountp', -- numberp, statusp, emptyorcommentlinep, followingcommentp, accountaliasp -- * Tests ,tests_Hledger_Read_JournalReader ) where --- * imports import Prelude () import Prelude.Compat hiding (readFile) import qualified Control.Exception as C import Control.Monad import Control.Monad.Except (ExceptT(..), runExceptT, throwError) import Control.Monad.State.Strict import qualified Data.Map.Strict as M import Data.Monoid import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Data.Time.LocalTime import Safe import Test.HUnit #ifdef TESTS import Test.Framework import Text.Megaparsec.Error #endif import Text.Megaparsec hiding (parse) import Text.Printf import System.FilePath import Hledger.Data import Hledger.Read.Common import Hledger.Read.TimeclockReader (timeclockfilep) import Hledger.Read.TimedotReader (timedotfilep) import Hledger.Utils -- $setup -- >>> :set -XOverloadedStrings --- * reader reader :: Reader reader = Reader {rFormat = "journal" ,rExtensions = ["journal", "j", "hledger", "ledger"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from hledger's journal file -- format, or give an error. parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse _ = parseAndFinaliseJournal journalp --- * parsers --- ** journal -- | A journal parser. Accumulates and returns a "ParsedJournal", -- which should be finalised/validated before use. -- -- >>> rejp (journalp <* eof) "2015/1/1\n a 0\n" -- Right Journal with 1 transactions, 1 accounts -- journalp :: MonadIO m => ErroringJournalParser m ParsedJournal journalp = do many addJournalItemP eof get -- | A side-effecting parser; parses any kind of journal item -- and updates the parse state accordingly. addJournalItemP :: MonadIO m => ErroringJournalParser m () addJournalItemP = -- all journal line types can be distinguished by the first -- character, can use choice without backtracking choice [ directivep , transactionp >>= modify' . addTransaction , modifiertransactionp >>= modify' . addModifierTransaction , periodictransactionp >>= modify' . addPeriodicTransaction , marketpricedirectivep >>= modify' . addMarketPrice , void emptyorcommentlinep , void multilinecommentp ] "transaction or directive" --- ** directives -- | Parse any journal directive and update the parse state accordingly. -- Cf http://hledger.org/manual.html#directives, -- http://ledger-cli.org/3.0/doc/ledger3.html#Command-Directives directivep :: MonadIO m => ErroringJournalParser m () directivep = (do optional $ char '!' choiceInState [ includedirectivep ,aliasdirectivep ,endaliasesdirectivep ,accountdirectivep ,applyaccountdirectivep ,commoditydirectivep ,endapplyaccountdirectivep ,tagdirectivep ,endtagdirectivep ,defaultyeardirectivep ,defaultcommoditydirectivep ,commodityconversiondirectivep ,ignoredpricecommoditydirectivep ] ) "directive" includedirectivep :: MonadIO m => ErroringJournalParser m () includedirectivep = do string "include" lift (some spacenonewline) filename <- lift restofline parentpos <- getPosition parentj <- get let childj = newJournalWithParseStateFrom parentj (ej :: Either String ParsedJournal) <- liftIO $ runExceptT $ do let curdir = takeDirectory (sourceName parentpos) filepath <- expandPath curdir filename `orRethrowIOError` (show parentpos ++ " locating " ++ filename) txt <- readFileAnyLineEnding filepath `orRethrowIOError` (show parentpos ++ " reading " ++ filepath) (ej1::Either (ParseError Char Dec) ParsedJournal) <- runParserT (evalStateT (choiceInState [journalp ,timeclockfilep ,timedotfilep -- can't include a csv file yet, that reader is special ]) childj) filepath txt either (throwError . ((show parentpos ++ " in included file " ++ show filename ++ ":\n") ++) . show) (return . journalAddFile (filepath, txt)) ej1 case ej of Left e -> throwError e Right childj -> modify' (\parentj -> childj <> parentj) -- discard child's parse info, prepend its (reversed) list data, combine other fields newJournalWithParseStateFrom :: Journal -> Journal newJournalWithParseStateFrom j = mempty{ jparsedefaultyear = jparsedefaultyear j ,jparsedefaultcommodity = jparsedefaultcommodity j ,jparseparentaccounts = jparseparentaccounts j ,jparsealiases = jparsealiases j -- ,jparsetransactioncount = jparsetransactioncount j ,jparsetimeclockentries = jparsetimeclockentries j } -- | Lift an IO action into the exception monad, rethrowing any IO -- error with the given message prepended. orRethrowIOError :: IO a -> String -> ExceptT String IO a orRethrowIOError io msg = ExceptT $ (Right <$> io) `C.catch` \(e::C.IOException) -> return $ Left $ printf "%s:\n%s" msg (show e) accountdirectivep :: JournalStateParser m () accountdirectivep = do string "account" lift (some spacenonewline) acct <- lift accountnamep newline many indentedlinep modify' (\j -> j{jaccounts = acct : jaccounts j}) indentedlinep :: JournalStateParser m String indentedlinep = lift (some spacenonewline) >> (rstrip <$> lift restofline) -- | Parse a one-line or multi-line commodity directive. -- -- >>> Right _ <- rejp commoditydirectivep "commodity $1.00" -- >>> Right _ <- rejp commoditydirectivep "commodity $\n format $1.00" -- >>> Right _ <- rejp commoditydirectivep "commodity $\n\n" -- a commodity with no format -- >>> Right _ <- rejp commoditydirectivep "commodity $1.00\n format $1.00" -- both, what happens ? commoditydirectivep :: Monad m => ErroringJournalParser m () commoditydirectivep = try commoditydirectiveonelinep <|> commoditydirectivemultilinep -- | Parse a one-line commodity directive. -- -- >>> Right _ <- rejp commoditydirectiveonelinep "commodity $1.00" -- >>> Right _ <- rejp commoditydirectiveonelinep "commodity $1.00 ; blah\n" commoditydirectiveonelinep :: Monad m => JournalStateParser m () commoditydirectiveonelinep = do string "commodity" lift (some spacenonewline) Amount{acommodity,astyle} <- amountp lift (many spacenonewline) _ <- followingcommentp <|> (lift eolof >> return "") let comm = Commodity{csymbol=acommodity, cformat=Just astyle} modify' (\j -> j{jcommodities=M.insert acommodity comm $ jcommodities j}) -- | Parse a multi-line commodity directive, containing 0 or more format subdirectives. -- -- >>> Right _ <- rejp commoditydirectivemultilinep "commodity $ ; blah \n format $1.00 ; blah" commoditydirectivemultilinep :: Monad m => ErroringJournalParser m () commoditydirectivemultilinep = do string "commodity" lift (some spacenonewline) sym <- lift commoditysymbolp _ <- followingcommentp <|> (lift eolof >> return "") mformat <- lastMay <$> many (indented $ formatdirectivep sym) let comm = Commodity{csymbol=sym, cformat=mformat} modify' (\j -> j{jcommodities=M.insert sym comm $ jcommodities j}) where indented = (lift (some spacenonewline) >>) -- | Parse a format (sub)directive, throwing a parse error if its -- symbol does not match the one given. formatdirectivep :: Monad m => CommoditySymbol -> ErroringJournalParser m AmountStyle formatdirectivep expectedsym = do string "format" lift (some spacenonewline) pos <- getPosition Amount{acommodity,astyle} <- amountp _ <- followingcommentp <|> (lift eolof >> return "") if acommodity==expectedsym then return astyle else parserErrorAt pos $ printf "commodity directive symbol \"%s\" and format directive symbol \"%s\" should be the same" expectedsym acommodity applyaccountdirectivep :: JournalStateParser m () applyaccountdirectivep = do string "apply" >> lift (some spacenonewline) >> string "account" lift (some spacenonewline) parent <- lift accountnamep newline pushParentAccount parent endapplyaccountdirectivep :: JournalStateParser m () endapplyaccountdirectivep = do string "end" >> lift (some spacenonewline) >> string "apply" >> lift (some spacenonewline) >> string "account" popParentAccount aliasdirectivep :: JournalStateParser m () aliasdirectivep = do string "alias" lift (some spacenonewline) alias <- lift accountaliasp addAccountAlias alias accountaliasp :: TextParser m AccountAlias accountaliasp = regexaliasp <|> basicaliasp basicaliasp :: TextParser m AccountAlias basicaliasp = do -- pdbg 0 "basicaliasp" old <- rstrip <$> (some $ noneOf ("=" :: [Char])) char '=' many spacenonewline new <- rstrip <$> anyChar `manyTill` eolof -- don't require a final newline, good for cli options return $ BasicAlias (T.pack old) (T.pack new) regexaliasp :: TextParser m AccountAlias regexaliasp = do -- pdbg 0 "regexaliasp" char '/' re <- some $ noneOf ("/\n\r" :: [Char]) -- paranoid: don't try to read past line end char '/' many spacenonewline char '=' many spacenonewline repl <- rstrip <$> anyChar `manyTill` eolof return $ RegexAlias re repl endaliasesdirectivep :: JournalStateParser m () endaliasesdirectivep = do string "end aliases" clearAccountAliases tagdirectivep :: JournalStateParser m () tagdirectivep = do string "tag" "tag directive" lift (some spacenonewline) _ <- lift $ some nonspace lift restofline return () endtagdirectivep :: JournalStateParser m () endtagdirectivep = do (string "end tag" <|> string "pop") "end tag or pop directive" lift restofline return () defaultyeardirectivep :: JournalStateParser m () defaultyeardirectivep = do char 'Y' "default year" lift (many spacenonewline) y <- some digitChar let y' = read y failIfInvalidYear y setYear y' defaultcommoditydirectivep :: Monad m => JournalStateParser m () defaultcommoditydirectivep = do char 'D' "default commodity" lift (some spacenonewline) Amount{..} <- amountp lift restofline setDefaultCommodityAndStyle (acommodity, astyle) marketpricedirectivep :: Monad m => JournalStateParser m MarketPrice marketpricedirectivep = do char 'P' "market price" lift (many spacenonewline) date <- try (do {LocalTime d _ <- datetimep; return d}) <|> datep -- a time is ignored lift (some spacenonewline) symbol <- lift commoditysymbolp lift (many spacenonewline) price <- amountp lift restofline return $ MarketPrice date symbol price ignoredpricecommoditydirectivep :: JournalStateParser m () ignoredpricecommoditydirectivep = do char 'N' "ignored-price commodity" lift (some spacenonewline) lift commoditysymbolp lift restofline return () commodityconversiondirectivep :: Monad m => JournalStateParser m () commodityconversiondirectivep = do char 'C' "commodity conversion" lift (some spacenonewline) amountp lift (many spacenonewline) char '=' lift (many spacenonewline) amountp lift restofline return () --- ** transactions modifiertransactionp :: MonadIO m => ErroringJournalParser m ModifierTransaction modifiertransactionp = do char '=' "modifier transaction" lift (many spacenonewline) valueexpr <- T.pack <$> lift restofline postings <- postingsp Nothing return $ ModifierTransaction valueexpr postings periodictransactionp :: MonadIO m => ErroringJournalParser m PeriodicTransaction periodictransactionp = do char '~' "periodic transaction" lift (many spacenonewline) periodexpr <- T.pack <$> lift restofline postings <- postingsp Nothing return $ PeriodicTransaction periodexpr postings -- | Parse a (possibly unbalanced) transaction. transactionp :: MonadIO m => ErroringJournalParser m Transaction transactionp = do -- ptrace "transactionp" pos <- getPosition date <- datep "transaction" edate <- optional (secondarydatep date) "secondary date" lookAhead (lift spacenonewline <|> newline) "whitespace or newline" status <- lift statusp "cleared status" code <- T.pack <$> lift codep "transaction code" description <- T.pack . strip <$> descriptionp comment <- try followingcommentp <|> (newline >> return "") let tags = commentTags comment postings <- postingsp (Just date) pos' <- getPosition let sourcepos = journalSourcePos pos pos' return $ txnTieKnot $ Transaction 0 sourcepos date edate status code description comment tags postings "" #ifdef TESTS test_transactionp = do let s `gives` t = do let p = parseWithState mempty transactionp s assertBool $ isRight p let Right t2 = p -- same f = assertEqual (f t) (f t2) assertEqual (tdate t) (tdate t2) assertEqual (tdate2 t) (tdate2 t2) assertEqual (tstatus t) (tstatus t2) assertEqual (tcode t) (tcode t2) assertEqual (tdescription t) (tdescription t2) assertEqual (tcomment t) (tcomment t2) assertEqual (ttags t) (ttags t2) assertEqual (tpreceding_comment_lines t) (tpreceding_comment_lines t2) assertEqual (show $ tpostings t) (show $ tpostings t2) -- "0000/01/01\n\n" `gives` nulltransaction unlines [ "2012/05/14=2012/05/15 (code) desc ; tcomment1", " ; tcomment2", " ; ttag1: val1", " * a $1.00 ; pcomment1", " ; pcomment2", " ; ptag1: val1", " ; ptag2: val2" ] `gives` nulltransaction{ tdate=parsedate "2012/05/14", tdate2=Just $ parsedate "2012/05/15", tstatus=Uncleared, tcode="code", tdescription="desc", tcomment=" tcomment1\n tcomment2\n ttag1: val1\n", ttags=[("ttag1","val1")], tpostings=[ nullposting{ pstatus=Cleared, paccount="a", pamount=Mixed [usd 1], pcomment=" pcomment1\n pcomment2\n ptag1: val1\n ptag2: val2\n", ptype=RegularPosting, ptags=[("ptag1","val1"),("ptag2","val2")], ptransaction=Nothing } ], tpreceding_comment_lines="" } unlines [ "2015/1/1", ] `gives` nulltransaction{ tdate=parsedate "2015/01/01", } assertRight $ parseWithState mempty transactionp $ unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking $-47.18" ,"" ] -- transactionp should not parse just a date assertLeft $ parseWithState mempty transactionp "2009/1/1\n" -- transactionp should not parse just a date and description assertLeft $ parseWithState mempty transactionp "2009/1/1 a\n" -- transactionp should not parse a following comment as part of the description let p = parseWithState mempty transactionp "2009/1/1 a ;comment\n b 1\n" assertRight p assertEqual "a" (let Right p' = p in tdescription p') -- parse transaction with following whitespace line assertRight $ parseWithState mempty transactionp $ unlines ["2012/1/1" ," a 1" ," b" ," " ] let p = parseWithState mempty transactionp $ unlines ["2009/1/1 x ; transaction comment" ," a 1 ; posting 1 comment" ," ; posting 1 comment 2" ," b" ," ; posting 2 comment" ] assertRight p assertEqual 2 (let Right t = p in length $ tpostings t) #endif --- ** postings -- Parse the following whitespace-beginning lines as postings, posting -- tags, and/or comments (inferring year, if needed, from the given date). postingsp :: MonadIO m => Maybe Day -> ErroringJournalParser m [Posting] postingsp mdate = many (try $ postingp mdate) "postings" -- linebeginningwithspaces :: Monad m => JournalParser m String -- linebeginningwithspaces = do -- sp <- lift (some spacenonewline) -- c <- nonspace -- cs <- lift restofline -- return $ sp ++ (c:cs) ++ "\n" postingp :: MonadIO m => Maybe Day -> ErroringJournalParser m Posting postingp mtdate = do -- pdbg 0 "postingp" lift (some spacenonewline) status <- lift statusp lift (many spacenonewline) account <- modifiedaccountnamep let (ptype, account') = (accountNamePostingType account, textUnbracket account) amount <- spaceandamountormissingp massertion <- partialbalanceassertionp _ <- fixedlotpricep lift (many spacenonewline) (comment,tags,mdate,mdate2) <- try (followingcommentandtagsp mtdate) <|> (newline >> return ("",[],Nothing,Nothing)) return posting { pdate=mdate , pdate2=mdate2 , pstatus=status , paccount=account' , pamount=amount , pcomment=comment , ptype=ptype , ptags=tags , pbalanceassertion=massertion } #ifdef TESTS test_postingp = do let s `gives` ep = do let parse = parseWithState mempty (postingp Nothing) s assertBool -- "postingp parser" $ isRight parse let Right ap = parse same f = assertEqual (f ep) (f ap) same pdate same pstatus same paccount same pamount same pcomment same ptype same ptags same ptransaction " expenses:food:dining $10.00 ; a: a a \n ; b: b b \n" `gives` posting{paccount="expenses:food:dining", pamount=Mixed [usd 10], pcomment=" a: a a \n b: b b \n", ptags=[("a","a a"), ("b","b b")]} " a 1 ; [2012/11/28]\n" `gives` ("a" `post` num 1){pcomment=" [2012/11/28]\n" ,ptags=[("date","2012/11/28")] ,pdate=parsedateM "2012/11/28"} " a 1 ; a:a, [=2012/11/28]\n" `gives` ("a" `post` num 1){pcomment=" a:a, [=2012/11/28]\n" ,ptags=[("a","a"), ("date2","2012/11/28")] ,pdate=Nothing} " a 1 ; a:a\n ; [2012/11/28=2012/11/29],b:b\n" `gives` ("a" `post` num 1){pcomment=" a:a\n [2012/11/28=2012/11/29],b:b\n" ,ptags=[("a","a"), ("date","2012/11/28"), ("date2","2012/11/29"), ("b","b")] ,pdate=parsedateM "2012/11/28"} assertBool -- "postingp parses a quoted commodity with numbers" (isRight $ parseWithState mempty (postingp Nothing) " a 1 \"DE123\"\n") -- ,"postingp parses balance assertions and fixed lot prices" ~: do assertBool (isRight $ parseWithState mempty (postingp Nothing) " a 1 \"DE123\" =$1 { =2.2 EUR} \n") -- let parse = parseWithState mempty postingp " a\n ;next-line comment\n" -- assertRight parse -- let Right p = parse -- assertEqual "next-line comment\n" (pcomment p) -- assertEqual (Just nullmixedamt) (pbalanceassertion p) #endif --- * more tests tests_Hledger_Read_JournalReader = TestList $ concat [ -- test_numberp [ "showParsedMarketPrice" ~: do let mp = parseWithState mempty marketpricedirectivep "P 2017/01/30 BTC $922.83\n" mpString = (fmap . fmap) showMarketPrice mp mpString `is` (Just (Right "P 2017/01/30 BTC $922.83")) ] ] {- old hunit tests tests_Hledger_Read_JournalReader = TestList $ concat [ test_numberp, test_amountp, test_spaceandamountormissingp, test_tagcomment, test_inlinecomment, test_comments, test_ledgerDateSyntaxToTags, test_postingp, test_transactionp, [ "modifiertransactionp" ~: do assertParse (parseWithState mempty modifiertransactionp "= (some value expr)\n some:postings 1\n") ,"periodictransactionp" ~: do assertParse (parseWithState mempty periodictransactionp "~ (some period expr)\n some:postings 1\n") ,"directivep" ~: do assertParse (parseWithState mempty directivep "!include /some/file.x\n") assertParse (parseWithState mempty directivep "account some:account\n") assertParse (parseWithState mempty (directivep >> directivep) "!account a\nend\n") ,"comment" ~: do assertParse (parseWithState mempty comment "; some comment \n") assertParse (parseWithState mempty comment " \t; x\n") assertParse (parseWithState mempty comment "#x") ,"datep" ~: do assertParse (parseWithState mempty datep "2011/1/1") assertParseFailure (parseWithState mempty datep "1/1") assertParse (parseWithState mempty{jpsYear=Just 2011} datep "1/1") ,"datetimep" ~: do let p = do {t <- datetimep; eof; return t} bad = assertParseFailure . parseWithState mempty p good = assertParse . parseWithState mempty p bad "2011/1/1" bad "2011/1/1 24:00:00" bad "2011/1/1 00:60:00" bad "2011/1/1 00:00:60" good "2011/1/1 00:00" good "2011/1/1 23:59:59" good "2011/1/1 3:5:7" -- timezone is parsed but ignored let startofday = LocalTime (fromGregorian 2011 1 1) (TimeOfDay 0 0 (fromIntegral 0)) assertParseEqual (parseWithState mempty p "2011/1/1 00:00-0800") startofday assertParseEqual (parseWithState mempty p "2011/1/1 00:00+1234") startofday ,"defaultyeardirectivep" ~: do assertParse (parseWithState mempty defaultyeardirectivep "Y 2010\n") assertParse (parseWithState mempty defaultyeardirectivep "Y 10001\n") ,"marketpricedirectivep" ~: assertParseEqual (parseWithState mempty marketpricedirectivep "P 2004/05/01 XYZ $55.00\n") (MarketPrice (parsedate "2004/05/01") "XYZ" $ usd 55) ,"ignoredpricecommoditydirectivep" ~: do assertParse (parseWithState mempty ignoredpricecommoditydirectivep "N $\n") ,"defaultcommoditydirectivep" ~: do assertParse (parseWithState mempty defaultcommoditydirectivep "D $1,000.0\n") ,"commodityconversiondirectivep" ~: do assertParse (parseWithState mempty commodityconversiondirectivep "C 1h = $50.00\n") ,"tagdirectivep" ~: do assertParse (parseWithState mempty tagdirectivep "tag foo \n") ,"endtagdirectivep" ~: do assertParse (parseWithState mempty endtagdirectivep "end tag \n") assertParse (parseWithState mempty endtagdirectivep "pop \n") ,"accountnamep" ~: do assertBool "accountnamep parses a normal account name" (isRight $ parsewith accountnamep "a:b:c") assertBool "accountnamep rejects an empty inner component" (isLeft $ parsewith accountnamep "a::c") assertBool "accountnamep rejects an empty leading component" (isLeft $ parsewith accountnamep ":b:c") assertBool "accountnamep rejects an empty trailing component" (isLeft $ parsewith accountnamep "a:b:") ,"leftsymbolamountp" ~: do assertParseEqual (parseWithState mempty leftsymbolamountp "$1") (usd 1 `withPrecision` 0) assertParseEqual (parseWithState mempty leftsymbolamountp "$-1") (usd (-1) `withPrecision` 0) assertParseEqual (parseWithState mempty leftsymbolamountp "-$1") (usd (-1) `withPrecision` 0) ,"amount" ~: do let -- | compare a parse result with an expected amount, showing the debug representation for clarity assertAmountParse parseresult amount = (either (const "parse error") showAmountDebug parseresult) ~?= (showAmountDebug amount) assertAmountParse (parseWithState mempty amountp "1 @ $2") (num 1 `withPrecision` 0 `at` (usd 2 `withPrecision` 0)) ]] -} hledger-lib-1.2/Hledger/Read/TimeclockReader.hs0000644000000000000000000001040113035510426017531 0ustar0000000000000000{-| A reader for the timeclock file format generated by timeclock.el (). Example: @ i 2007\/03\/10 12:26:00 hledger o 2007\/03\/10 17:26:02 @ From timeclock.el 2.6: @ A timeclock contains data in the form of a single entry per line. Each entry has the form: CODE YYYY/MM/DD HH:MM:SS [COMMENT] CODE is one of: b, h, i, o or O. COMMENT is optional when the code is i, o or O. The meanings of the codes are: b Set the current time balance, or \"time debt\". Useful when archiving old log data, when a debt must be carried forward. The COMMENT here is the number of seconds of debt. h Set the required working time for the given day. This must be the first entry for that day. The COMMENT in this case is the number of hours in this workday. Floating point amounts are allowed. i Clock in. The COMMENT in this case should be the name of the project worked on. o Clock out. COMMENT is unnecessary, but can be used to provide a description of how the period went, for example. O Final clock out. Whatever project was being worked on, it is now finished. Useful for creating summary reports. @ -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Read.TimeclockReader ( -- * Reader reader, -- * Misc other exports timeclockfilep, -- * Tests tests_Hledger_Read_TimeclockReader ) where import Prelude () import Prelude.Compat import Control.Monad import Control.Monad.Except (ExceptT) import Control.Monad.State.Strict import Data.Maybe (fromMaybe) import Data.Text (Text) import qualified Data.Text as T import Test.HUnit import Text.Megaparsec hiding (parse) import Hledger.Data -- XXX too much reuse ? import Hledger.Read.Common import Hledger.Utils reader :: Reader reader = Reader {rFormat = "timeclock" ,rExtensions = ["timeclock"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from timeclock.el's timeclock -- format, saving the provided file path and the current time, or give an -- error. parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse _ = parseAndFinaliseJournal timeclockfilep timeclockfilep :: ErroringJournalParser IO ParsedJournal timeclockfilep = do many timeclockitemp eof j@Journal{jparsetimeclockentries=es} <- get -- Convert timeclock entries in this journal to transactions, closing any unfinished sessions. -- Doing this here rather than in journalFinalise means timeclock sessions can't span file boundaries, -- but it simplifies code above. now <- liftIO getCurrentLocalTime -- entries have been parsed in reverse order. timeclockEntriesToTransactions -- expects them to be in normal order, then we must reverse again since -- journalFinalise expects them in reverse order let j' = j{jtxns = reverse $ timeclockEntriesToTransactions now $ reverse es, jparsetimeclockentries = []} return j' where -- As all ledger line types can be distinguished by the first -- character, excepting transactions versus empty (blank or -- comment-only) lines, can use choice w/o try timeclockitemp = choice [ void emptyorcommentlinep , timeclockentryp >>= \e -> modify' (\j -> j{jparsetimeclockentries = e : jparsetimeclockentries j}) ] "timeclock entry, or default year or historical price directive" -- | Parse a timeclock entry. timeclockentryp :: JournalStateParser m TimeclockEntry timeclockentryp = do sourcepos <- genericSourcePos <$> lift getPosition code <- oneOf ("bhioO" :: [Char]) lift (some spacenonewline) datetime <- datetimep account <- fromMaybe "" <$> optional (lift (some spacenonewline) >> modifiedaccountnamep) description <- T.pack . fromMaybe "" <$> lift (optional (some spacenonewline >> restofline)) return $ TimeclockEntry sourcepos (read [code]) datetime account description tests_Hledger_Read_TimeclockReader = TestList [ ] hledger-lib-1.2/Hledger/Read/TimedotReader.hs0000644000000000000000000001001313035510426017223 0ustar0000000000000000{-| A reader for the "timedot" file format. Example: @ #DATE #ACCT DOTS # Each dot represents 15m, spaces are ignored # on 2/1, 1h was spent on FOSS haskell work, 0.25h on research, etc. 2/1 fos.haskell .... .. biz.research . inc.client1 .... .... .... .... .... .... 2/2 biz.research . inc.client1 .... .... .. @ -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Read.TimedotReader ( -- * Reader reader, -- * Misc other exports timedotfilep, -- * Tests tests_Hledger_Read_TimedotReader ) where import Prelude () import Prelude.Compat import Control.Monad import Control.Monad.Except (ExceptT) import Control.Monad.State.Strict import Data.Char (isSpace) import Data.List (foldl') import Data.Maybe import Data.Text (Text) import Test.HUnit import Text.Megaparsec hiding (parse) import Hledger.Data import Hledger.Read.Common import Hledger.Utils hiding (ptrace) -- easier to toggle this here sometimes -- import qualified Hledger.Utils (ptrace) -- ptrace = Hledger.Utils.ptrace ptrace :: Monad m => a -> m a ptrace = return reader :: Reader reader = Reader {rFormat = "timedot" ,rExtensions = ["timedot"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from the timedot format, or give an error. parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse _ = parseAndFinaliseJournal timedotfilep timedotfilep :: JournalStateParser m ParsedJournal timedotfilep = do many timedotfileitemp eof get where timedotfileitemp :: JournalStateParser m () timedotfileitemp = do ptrace "timedotfileitemp" choice [ void emptyorcommentlinep ,timedotdayp >>= \ts -> modify' (addTransactions ts) ] "timedot day entry, or default year or comment line or blank line" addTransactions :: [Transaction] -> Journal -> Journal addTransactions ts j = foldl' (flip ($)) j (map addTransaction ts) -- | Parse timedot day entries to zero or more time transactions for that day. -- @ -- 2/1 -- fos.haskell .... .. -- biz.research . -- inc.client1 .... .... .... .... .... .... -- @ timedotdayp :: JournalStateParser m [Transaction] timedotdayp = do ptrace " timedotdayp" d <- datep <* lift eolof es <- catMaybes <$> many (const Nothing <$> try emptyorcommentlinep <|> Just <$> (notFollowedBy datep >> timedotentryp)) return $ map (\t -> t{tdate=d}) es -- <$> many timedotentryp -- | Parse a single timedot entry to one (dateless) transaction. -- @ -- fos.haskell .... .. -- @ timedotentryp :: JournalStateParser m Transaction timedotentryp = do ptrace " timedotentryp" pos <- genericSourcePos <$> getPosition lift (many spacenonewline) a <- modifiedaccountnamep lift (many spacenonewline) hours <- try (followingcommentp >> return 0) <|> (timedotdurationp <* (try followingcommentp <|> (newline >> return ""))) let t = nulltransaction{ tsourcepos = pos, tstatus = Cleared, tpostings = [ nullposting{paccount=a ,pamount=Mixed [setAmountPrecision 2 $ num hours] -- don't assume hours; do set precision to 2 ,ptype=VirtualPosting ,ptransaction=Just t } ] } return t timedotdurationp :: JournalStateParser m Quantity timedotdurationp = try timedotnumberp <|> timedotdotsp -- | Parse a duration written as a decimal number of hours (optionally followed by the letter h). -- @ -- 1.5h -- @ timedotnumberp :: JournalStateParser m Quantity timedotnumberp = do (q, _, _, _) <- lift numberp lift (many spacenonewline) optional $ char 'h' lift (many spacenonewline) return q -- | Parse a quantity written as a line of dots, each representing 0.25. -- @ -- .... .. -- @ timedotdotsp :: JournalStateParser m Quantity timedotdotsp = do dots <- filter (not.isSpace) <$> many (oneOf (". " :: [Char])) return $ (/4) $ fromIntegral $ length dots tests_Hledger_Read_TimedotReader = TestList [ ] hledger-lib-1.2/Hledger/Reports.hs0000644000000000000000000000234313035210046015260 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Generate several common kinds of report from a journal, as \"*Report\" - simple intermediate data structures intended to be easily rendered as text, html, json, csv etc. by hledger commands, hamlet templates, javascript, or whatever. -} module Hledger.Reports ( module Hledger.Reports.ReportOptions, module Hledger.Reports.EntriesReport, module Hledger.Reports.PostingsReport, module Hledger.Reports.TransactionsReports, module Hledger.Reports.BalanceReport, module Hledger.Reports.MultiBalanceReports, -- module Hledger.Reports.BalanceHistoryReport, -- * Tests tests_Hledger_Reports ) where import Test.HUnit import Hledger.Reports.ReportOptions import Hledger.Reports.EntriesReport import Hledger.Reports.PostingsReport import Hledger.Reports.TransactionsReports import Hledger.Reports.BalanceReport import Hledger.Reports.MultiBalanceReports -- import Hledger.Reports.BalanceHistoryReport tests_Hledger_Reports :: Test tests_Hledger_Reports = TestList $ -- ++ tests_isInterestingIndented [ tests_Hledger_Reports_ReportOptions, tests_Hledger_Reports_EntriesReport, tests_Hledger_Reports_PostingsReport, tests_Hledger_Reports_BalanceReport ] hledger-lib-1.2/Hledger/Reports/BalanceHistoryReport.hs0000644000000000000000000000173513035210046021367 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Account balance history report. -} -- XXX not used module Hledger.Reports.BalanceHistoryReport ( accountBalanceHistory -- -- * Tests -- tests_Hledger_Reports_BalanceReport ) where import Data.Time.Calendar -- import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Reports.ReportOptions import Hledger.Reports.TransactionsReports -- | Get the historical running inclusive balance of a particular account, -- from earliest to latest posting date. accountBalanceHistory :: ReportOpts -> Journal -> Account -> [(Day, MixedAmount)] accountBalanceHistory ropts j a = [(getdate t, bal) | (t,_,_,_,_,bal) <- items] where (_,items) = journalTransactionsReport ropts j acctquery inclusivebal = True acctquery = Acct $ (if inclusivebal then accountNameToAccountRegex else accountNameToAccountOnlyRegex) $ aname a getdate = if date2_ ropts then transactionDate2 else tdate hledger-lib-1.2/Hledger/Reports/BalanceReport.hs0000644000000000000000000003664313067565120020026 0ustar0000000000000000{-| Balance report, used by the balance command. -} {-# LANGUAGE FlexibleInstances, ScopedTypeVariables, OverloadedStrings #-} module Hledger.Reports.BalanceReport ( BalanceReport, BalanceReportItem, balanceReport, balanceReportValue, mixedAmountValue, amountValue, flatShowsExclusiveBalance, -- * Tests tests_Hledger_Reports_BalanceReport ) where import Data.List import Data.Ord import Data.Maybe import Data.Time.Calendar import Test.HUnit import qualified Data.Text as T import Hledger.Data import Hledger.Read (mamountp') import Hledger.Query import Hledger.Utils import Hledger.Reports.ReportOptions -- | A simple single-column balance report. It has: -- -- 1. a list of items, one per account, each containing: -- -- * the full account name -- -- * the Ledger-style elided short account name -- (the leaf account name, prefixed by any boring parents immediately above); -- or with --flat, the full account name again -- -- * the number of indentation steps for rendering a Ledger-style account tree, -- taking into account elided boring parents, --no-elide and --flat -- -- * an amount -- -- 2. the total of all amounts -- type BalanceReport = ([BalanceReportItem], MixedAmount) type BalanceReportItem = (AccountName, AccountName, Int, MixedAmount) -- | When true (the default), this makes balance --flat reports and their implementation clearer. -- Single/multi-col balance reports currently aren't all correct if this is false. flatShowsExclusiveBalance = True -- | Enabling this makes balance --flat --empty also show parent accounts without postings, -- in addition to those with postings and a zero balance. Disabling it shows only the latter. -- No longer supported, but leave this here for a bit. -- flatShowsPostinglessAccounts = True -- | Generate a simple balance report, containing the matched accounts and -- their balances (change of balance) during the specified period. -- This is like PeriodChangeReport with a single column (but more mature, -- eg this can do hierarchical display). balanceReport :: ReportOpts -> Query -> Journal -> BalanceReport balanceReport opts q j = (items, total) where -- dbg1 = const id -- exclude from debug output dbg1 s = let p = "balanceReport" in Hledger.Utils.dbg1 (p++" "++s) -- add prefix in debug output accts = ledgerRootAccount $ ledgerFromJournal q $ journalSelectingAmountFromOpts opts j accts' :: [Account] | queryDepth q == 0 = dbg1 "accts" $ take 1 $ clipAccountsAndAggregate (queryDepth q) $ flattenAccounts accts | flat_ opts = dbg1 "accts" $ filterzeros $ filterempty $ drop 1 $ clipAccountsAndAggregate (queryDepth q) $ flattenAccounts accts | otherwise = dbg1 "accts" $ filter (not.aboring) $ drop 1 $ flattenAccounts $ markboring $ prunezeros $ clipAccounts (queryDepth q) accts where balance = if flat_ opts then aebalance else aibalance filterzeros = if empty_ opts then id else filter (not . isZeroMixedAmount . balance) filterempty = filter (\a -> anumpostings a > 0 || not (isZeroMixedAmount (balance a))) prunezeros = if empty_ opts then id else fromMaybe nullacct . pruneAccounts (isZeroMixedAmount . balance) markboring = if no_elide_ opts then id else markBoringParentAccounts items = dbg1 "items" $ map (balanceReportItem opts q) accts' total | not (flat_ opts) = dbg1 "total" $ sum [amt | (_,_,indent,amt) <- items, indent == 0] | otherwise = dbg1 "total" $ if flatShowsExclusiveBalance then sum $ map fourth4 items else sum $ map aebalance $ clipAccountsAndAggregate 1 accts' -- | In an account tree with zero-balance leaves removed, mark the -- elidable parent accounts (those with one subaccount and no balance -- of their own). markBoringParentAccounts :: Account -> Account markBoringParentAccounts = tieAccountParents . mapAccounts mark where mark a | length (asubs a) == 1 && isZeroMixedAmount (aebalance a) = a{aboring=True} | otherwise = a balanceReportItem :: ReportOpts -> Query -> Account -> BalanceReportItem balanceReportItem opts q a | flat_ opts = (name, name, 0, (if flatShowsExclusiveBalance then aebalance else aibalance) a) | otherwise = (name, elidedname, indent, aibalance a) where name | queryDepth q > 0 = aname a | otherwise = "..." elidedname = accountNameFromComponents (adjacentboringparentnames ++ [accountLeafName name]) adjacentboringparentnames = reverse $ map (accountLeafName.aname) $ takeWhile aboring parents indent = length $ filter (not.aboring) parents -- parents exclude the tree's root node parents = case parentAccounts a of [] -> [] as -> init as -- -- the above using the newer multi balance report code: -- balanceReport' opts q j = (items, total) -- where -- MultiBalanceReport (_,mbrrows,mbrtotals) = PeriodChangeReport opts q j -- items = [(a,a',n, headDef 0 bs) | ((a,a',n), bs) <- mbrrows] -- total = headDef 0 mbrtotals -- | Convert all the amounts in a single-column balance report to -- their value on the given date in their default valuation -- commodities. balanceReportValue :: Journal -> Day -> BalanceReport -> BalanceReport balanceReportValue j d r = r' where (items,total) = r r' = dbg8 "known market prices" (jmarketprices j) `seq` dbg8 "report end date" d `seq` dbg8 "balanceReportValue" ([(n, n', i, mixedAmountValue j d a) |(n,n',i,a) <- items], mixedAmountValue j d total) mixedAmountValue :: Journal -> Day -> MixedAmount -> MixedAmount mixedAmountValue j d (Mixed as) = Mixed $ map (amountValue j d) as -- | Find the market value of this amount on the given date, in it's -- default valuation commodity, based on recorded market prices. -- If no default valuation commodity can be found, the amount is left -- unchanged. amountValue :: Journal -> Day -> Amount -> Amount amountValue j d a = case commodityValue j d (acommodity a) of Just v -> v{aquantity=aquantity v * aquantity a ,aprice=aprice a } Nothing -> a -- | Find the market value, if known, of one unit of this commodity (A) on -- the given valuation date, in the commodity (B) mentioned in the latest -- applicable market price. The latest applicable market price is the market -- price directive for commodity A with the latest date that is on or before -- the valuation date; or if there are multiple such prices with the same date, -- the last parsed. commodityValue :: Journal -> Day -> CommoditySymbol -> Maybe Amount commodityValue j valuationdate c | null applicableprices = dbg Nothing | otherwise = dbg $ Just $ mpamount $ last applicableprices where dbg = dbg8 ("using market price for "++T.unpack c) applicableprices = [p | p <- sortBy (comparing mpdate) $ jmarketprices j , mpcommodity p == c , mpdate p <= valuationdate ] tests_balanceReport = let (opts,journal) `gives` r = do let (eitems, etotal) = r (aitems, atotal) = balanceReport opts (queryFromOpts nulldate opts) journal showw (acct,acct',indent,amt) = (acct, acct', indent, showMixedAmountDebug amt) assertEqual "items" (map showw eitems) (map showw aitems) assertEqual "total" (showMixedAmountDebug etotal) (showMixedAmountDebug atotal) usd0 = usd 0 in [ "balanceReport with no args on null journal" ~: do (defreportopts, nulljournal) `gives` ([], Mixed [nullamt]) ,"balanceReport with no args on sample journal" ~: do (defreportopts, samplejournal) `gives` ([ ("assets","assets",0, mamountp' "$-1.00") ,("assets:bank:saving","bank:saving",1, mamountp' "$1.00") ,("assets:cash","cash",1, mamountp' "$-2.00") ,("expenses","expenses",0, mamountp' "$2.00") ,("expenses:food","food",1, mamountp' "$1.00") ,("expenses:supplies","supplies",1, mamountp' "$1.00") ,("income","income",0, mamountp' "$-2.00") ,("income:gifts","gifts",1, mamountp' "$-1.00") ,("income:salary","salary",1, mamountp' "$-1.00") ,("liabilities:debts","liabilities:debts",0, mamountp' "$1.00") ], Mixed [usd0]) ,"balanceReport with --depth=N" ~: do (defreportopts{depth_=Just 1}, samplejournal) `gives` ([ ("assets", "assets", 0, mamountp' "$-1.00") ,("expenses", "expenses", 0, mamountp' "$2.00") ,("income", "income", 0, mamountp' "$-2.00") ,("liabilities", "liabilities", 0, mamountp' "$1.00") ], Mixed [usd0]) ,"balanceReport with depth:N" ~: do (defreportopts{query_="depth:1"}, samplejournal) `gives` ([ ("assets", "assets", 0, mamountp' "$-1.00") ,("expenses", "expenses", 0, mamountp' "$2.00") ,("income", "income", 0, mamountp' "$-2.00") ,("liabilities", "liabilities", 0, mamountp' "$1.00") ], Mixed [usd0]) ,"balanceReport with a date or secondary date span" ~: do (defreportopts{query_="date:'in 2009'"}, samplejournal2) `gives` ([], Mixed [nullamt]) (defreportopts{query_="date2:'in 2009'"}, samplejournal2) `gives` ([ ("assets:bank:checking","assets:bank:checking",0,mamountp' "$1.00") ,("income:salary","income:salary",0,mamountp' "$-1.00") ], Mixed [usd0]) ,"balanceReport with desc:" ~: do (defreportopts{query_="desc:income"}, samplejournal) `gives` ([ ("assets:bank:checking","assets:bank:checking",0,mamountp' "$1.00") ,("income:salary","income:salary",0, mamountp' "$-1.00") ], Mixed [usd0]) ,"balanceReport with not:desc:" ~: do (defreportopts{query_="not:desc:income"}, samplejournal) `gives` ([ ("assets","assets",0, mamountp' "$-2.00") ,("assets:bank","bank",1, Mixed [usd0]) ,("assets:bank:checking","checking",2,mamountp' "$-1.00") ,("assets:bank:saving","saving",2, mamountp' "$1.00") ,("assets:cash","cash",1, mamountp' "$-2.00") ,("expenses","expenses",0, mamountp' "$2.00") ,("expenses:food","food",1, mamountp' "$1.00") ,("expenses:supplies","supplies",1, mamountp' "$1.00") ,("income:gifts","income:gifts",0, mamountp' "$-1.00") ,("liabilities:debts","liabilities:debts",0, mamountp' "$1.00") ], Mixed [usd0]) {- ,"accounts report with account pattern o" ~: defreportopts{patterns_=["o"]} `gives` [" $1 expenses:food" ," $-2 income" ," $-1 gifts" ," $-1 salary" ,"--------------------" ," $-1" ] ,"accounts report with account pattern o and --depth 1" ~: defreportopts{patterns_=["o"],depth_=Just 1} `gives` [" $1 expenses" ," $-2 income" ,"--------------------" ," $-1" ] ,"accounts report with account pattern a" ~: defreportopts{patterns_=["a"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ," $-1 income:salary" ," $1 liabilities:debts" ,"--------------------" ," $-1" ] ,"accounts report with account pattern e" ~: defreportopts{patterns_=["e"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ," $2 expenses" ," $1 food" ," $1 supplies" ," $-2 income" ," $-1 gifts" ," $-1 salary" ," $1 liabilities:debts" ,"--------------------" ," 0" ] ,"accounts report with unmatched parent of two matched subaccounts" ~: defreportopts{patterns_=["cash","saving"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ,"--------------------" ," $-1" ] ,"accounts report with multi-part account name" ~: defreportopts{patterns_=["expenses:food"]} `gives` [" $1 expenses:food" ,"--------------------" ," $1" ] ,"accounts report with negative account pattern" ~: defreportopts{patterns_=["not:assets"]} `gives` [" $2 expenses" ," $1 food" ," $1 supplies" ," $-2 income" ," $-1 gifts" ," $-1 salary" ," $1 liabilities:debts" ,"--------------------" ," $1" ] ,"accounts report negative account pattern always matches full name" ~: defreportopts{patterns_=["not:e"]} `gives` ["--------------------" ," 0" ] ,"accounts report negative patterns affect totals" ~: defreportopts{patterns_=["expenses","not:food"]} `gives` [" $1 expenses:supplies" ,"--------------------" ," $1" ] ,"accounts report with -E shows zero-balance accounts" ~: defreportopts{patterns_=["assets"],empty_=True} `gives` [" $-1 assets" ," $1 bank" ," 0 checking" ," $1 saving" ," $-2 cash" ,"--------------------" ," $-1" ] ,"accounts report with cost basis" ~: do j <- (readJournal Nothing Nothing Nothing $ unlines ["" ,"2008/1/1 test " ," a:b 10h @ $50" ," c:d " ]) >>= either error' return let j' = journalCanonicaliseAmounts $ journalConvertAmountsToCost j -- enable cost basis adjustment balanceReportAsText defreportopts (balanceReport defreportopts Any j') `is` [" $500 a:b" ," $-500 c:d" ,"--------------------" ," 0" ] -} ] Right samplejournal2 = journalBalanceTransactions False nulljournal{ jtxns = [ txnTieKnot Transaction{ tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/01/01", tdate2=Just $ parsedate "2009/01/01", tstatus=Uncleared, tcode="", tdescription="income", tcomment="", ttags=[], tpostings= [posting {paccount="assets:bank:checking", pamount=Mixed [usd 1]} ,posting {paccount="income:salary", pamount=missingmixedamt} ], tpreceding_comment_lines="" } ] } -- tests_isInterestingIndented = [ -- "isInterestingIndented" ~: do -- let (opts, journal, acctname) `gives` r = isInterestingIndented opts l acctname `is` r -- where l = ledgerFromJournal (queryFromOpts nulldate opts) journal -- (defreportopts, samplejournal, "expenses") `gives` True -- ] tests_Hledger_Reports_BalanceReport :: Test tests_Hledger_Reports_BalanceReport = TestList tests_balanceReport hledger-lib-1.2/Hledger/Reports/EntriesReport.hs0000644000000000000000000000263613035210046020072 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Journal entries report, used by the print command. -} module Hledger.Reports.EntriesReport ( EntriesReport, EntriesReportItem, entriesReport, -- * Tests tests_Hledger_Reports_EntriesReport ) where import Data.List import Data.Ord import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Reports.ReportOptions -- | A journal entries report is a list of whole transactions as -- originally entered in the journal (mostly). This is used by eg -- hledger's print command and hledger-web's journal entries view. type EntriesReport = [EntriesReportItem] type EntriesReportItem = Transaction -- | Select transactions for an entries report. entriesReport :: ReportOpts -> Query -> Journal -> EntriesReport entriesReport opts q j = sortBy (comparing date) $ filter (q `matchesTransaction`) ts where date = transactionDateFn opts ts = jtxns $ journalSelectingAmountFromOpts opts j tests_entriesReport :: [Test] tests_entriesReport = [ "entriesReport" ~: do assertEqual "not acct" 1 (length $ entriesReport defreportopts (Not $ Acct "bank") samplejournal) let sp = mkdatespan "2008/06/01" "2008/07/01" assertEqual "date" 3 (length $ entriesReport defreportopts (Date sp) samplejournal) ] tests_Hledger_Reports_EntriesReport :: Test tests_Hledger_Reports_EntriesReport = TestList $ tests_entriesReport hledger-lib-1.2/Hledger/Reports/MultiBalanceReports.hs0000644000000000000000000002371013067565677021234 0ustar0000000000000000{-# LANGUAGE FlexibleInstances, ScopedTypeVariables #-} {-| Multi-column balance reports, used by the balance command. -} module Hledger.Reports.MultiBalanceReports ( MultiBalanceReport(..), MultiBalanceReportRow, multiBalanceReport, multiBalanceReportValue, singleBalanceReport -- -- * Tests -- tests_Hledger_Reports_MultiBalanceReport ) where import Data.List import Data.Maybe import Data.Ord import Data.Time.Calendar import Safe -- import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Utils import Hledger.Reports.ReportOptions import Hledger.Reports.BalanceReport -- | A multi balance report is a balance report with one or more columns. It has: -- -- 1. a list of each column's period (date span) -- -- 2. a list of row items, each containing: -- -- * the full account name -- -- * the leaf account name -- -- * the account's depth -- -- * the amounts to show in each column -- -- * the total of the row's amounts -- -- * the average of the row's amounts -- -- 3. the column totals and the overall total and average -- -- The meaning of the amounts depends on the type of multi balance -- report, of which there are three: periodic, cumulative and historical -- (see 'BalanceType' and "Hledger.Cli.Balance"). newtype MultiBalanceReport = MultiBalanceReport ([DateSpan] ,[MultiBalanceReportRow] ,MultiBalanceReportTotals ) type MultiBalanceReportRow = (AccountName, AccountName, Int, [MixedAmount], MixedAmount, MixedAmount) type MultiBalanceReportTotals = ([MixedAmount], MixedAmount, MixedAmount) instance Show MultiBalanceReport where -- use ppShow to break long lists onto multiple lines -- we add some bogus extra shows here to help ppShow parse the output -- and wrap tuples and lists properly show (MultiBalanceReport (spans, items, totals)) = "MultiBalanceReport (ignore extra quotes):\n" ++ ppShow (show spans, map show items, totals) -- type alias just to remind us which AccountNames might be depth-clipped, below. type ClippedAccountName = AccountName -- | Generates a single column BalanceReport like balanceReport, but uses -- multiBalanceReport, so supports --historical. -- TODO Does not support boring parent eliding or --flat yet. singleBalanceReport :: ReportOpts -> Query -> Journal -> BalanceReport singleBalanceReport opts q j = (rows', total) where MultiBalanceReport (_, rows, (totals, _, _)) = multiBalanceReport opts q j rows' = [(a ,if flat_ opts then a else a' -- BalanceReport expects full account name here with --flat ,if tree_ opts then d-1 else 0 -- BalanceReport uses 0-based account depths , headDef nullmixedamt amts -- 0 columns is illegal, should not happen, return zeroes if it does ) | (a,a',d, amts, _, _) <- rows] total = headDef nullmixedamt totals -- | Generate a multicolumn balance report for the matched accounts, -- showing the change of balance, accumulated balance, or historical balance -- in each of the specified periods. multiBalanceReport :: ReportOpts -> Query -> Journal -> MultiBalanceReport multiBalanceReport opts q j = MultiBalanceReport (displayspans, items, totalsrow) where symq = dbg1 "symq" $ filterQuery queryIsSym $ dbg1 "requested q" q depthq = dbg1 "depthq" $ filterQuery queryIsDepth q depth = queryDepth depthq depthless = dbg1 "depthless" . filterQuery (not . queryIsDepth) datelessq = dbg1 "datelessq" $ filterQuery (not . queryIsDateOrDate2) q dateqcons = if date2_ opts then Date2 else Date precedingq = dbg1 "precedingq" $ And [datelessq, dateqcons $ DateSpan Nothing (spanStart reportspan)] requestedspan = dbg1 "requestedspan" $ queryDateSpan (date2_ opts) q -- span specified by -b/-e/-p options and query args requestedspan' = dbg1 "requestedspan'" $ requestedspan `spanDefaultsFrom` journalDateSpan (date2_ opts) j -- if open-ended, close it using the journal's end dates intervalspans = dbg1 "intervalspans" $ splitSpan (interval_ opts) requestedspan' -- interval spans enclosing it reportspan = dbg1 "reportspan" $ DateSpan (maybe Nothing spanStart $ headMay intervalspans) -- the requested span enlarged to a whole number of intervals (maybe Nothing spanEnd $ lastMay intervalspans) newdatesq = dbg1 "newdateq" $ dateqcons reportspan reportq = dbg1 "reportq" $ depthless $ And [datelessq, newdatesq] -- user's query enlarged to whole intervals and with no depth limit ps :: [Posting] = dbg1 "ps" $ journalPostings $ filterJournalAmounts symq $ -- remove amount parts excluded by cur: filterJournalPostings reportq $ -- remove postings not matched by (adjusted) query journalSelectingAmountFromOpts opts j displayspans = dbg1 "displayspans" $ splitSpan (interval_ opts) displayspan where displayspan | empty_ opts = dbg1 "displayspan (-E)" reportspan -- all the requested intervals | otherwise = dbg1 "displayspan" $ requestedspan `spanIntersect` matchedspan -- exclude leading/trailing empty intervals matchedspan = dbg1 "matchedspan" $ postingsDateSpan' (whichDateFromOpts opts) ps psPerSpan :: [[Posting]] = dbg1 "psPerSpan" [filter (isPostingInDateSpan' (whichDateFromOpts opts) s) ps | s <- displayspans] postedAcctBalChangesPerSpan :: [[(ClippedAccountName, MixedAmount)]] = dbg1 "postedAcctBalChangesPerSpan" $ map postingAcctBals psPerSpan where postingAcctBals :: [Posting] -> [(ClippedAccountName, MixedAmount)] postingAcctBals ps = [(aname a, (if tree_ opts then aibalance else aebalance) a) | a <- as] where as = depthLimit $ (if tree_ opts then id else filter ((>0).anumpostings)) $ drop 1 $ accountsFromPostings ps depthLimit | tree_ opts = filter ((depthq `matchesAccount`).aname) -- exclude deeper balances | otherwise = clipAccountsAndAggregate depth -- aggregate deeper balances at the depth limit postedAccts :: [AccountName] = dbg1 "postedAccts" $ sort $ accountNamesFromPostings ps -- starting balances and accounts from transactions before the report start date startacctbals = dbg1 "startacctbals" $ map (\(a,_,_,b) -> (a,b)) startbalanceitems where (startbalanceitems,_) = dbg1 "starting balance report" $ balanceReport opts' precedingq j where opts' | tree_ opts = opts{no_elide_=True} | otherwise = opts{accountlistmode_=ALFlat} startingBalanceFor a = fromMaybe nullmixedamt $ lookup a startacctbals startAccts = dbg1 "startAccts" $ map fst startacctbals displayedAccts :: [ClippedAccountName] = dbg1 "displayedAccts" $ (if tree_ opts then expandAccountNames else id) $ nub $ map (clipOrEllipsifyAccountName depth) $ if empty_ opts then nub $ sort $ startAccts ++ postedAccts else postedAccts acctBalChangesPerSpan :: [[(ClippedAccountName, MixedAmount)]] = dbg1 "acctBalChangesPerSpan" [sortBy (comparing fst) $ unionBy (\(a,_) (a',_) -> a == a') postedacctbals zeroes | postedacctbals <- postedAcctBalChangesPerSpan] where zeroes = [(a, nullmixedamt) | a <- displayedAccts] acctBalChanges :: [(ClippedAccountName, [MixedAmount])] = dbg1 "acctBalChanges" [(a, map snd abs) | abs@((a,_):_) <- transpose acctBalChangesPerSpan] -- never null, or used when null... items :: [MultiBalanceReportRow] = dbg1 "items" [(a, accountLeafName a, accountNameLevel a, displayedBals, rowtot, rowavg) | (a,changes) <- acctBalChanges , let displayedBals = case balancetype_ opts of HistoricalBalance -> drop 1 $ scanl (+) (startingBalanceFor a) changes CumulativeChange -> drop 1 $ scanl (+) nullmixedamt changes _ -> changes , let rowtot = sum displayedBals , let rowavg = averageMixedAmounts displayedBals , empty_ opts || depth == 0 || any (not . isZeroMixedAmount) displayedBals ] totals :: [MixedAmount] = -- dbg1 "totals" $ map sum balsbycol where balsbycol = transpose [bs | (a,_,_,bs,_,_) <- items, not (tree_ opts) || a `elem` highestlevelaccts] highestlevelaccts = dbg1 "highestlevelaccts" [a | a <- displayedAccts, not $ any (`elem` displayedAccts) $ init $ expandAccountName a] totalsrow :: MultiBalanceReportTotals = dbg1 "totalsrow" (totals, sum totals, averageMixedAmounts totals) dbg1 s = let p = "multiBalanceReport" in Hledger.Utils.dbg1 (p++" "++s) -- add prefix in this function's debug output -- dbg1 = const id -- exclude this function from debug output -- | Convert all the amounts in a multi-column balance report to their -- value on the given date in their default valuation commodities -- (which are determined as of that date, not the report interval dates). multiBalanceReportValue :: Journal -> Day -> MultiBalanceReport -> MultiBalanceReport multiBalanceReportValue j d r = r' where MultiBalanceReport (spans, rows, (coltotals, rowtotaltotal, rowavgtotal)) = r r' = MultiBalanceReport (spans, [(acct, acct', depth, map convert rowamts, convert rowtotal, convert rowavg) | (acct, acct', depth, rowamts, rowtotal, rowavg) <- rows], (map convert coltotals, convert rowtotaltotal, convert rowavgtotal)) convert = mixedAmountValue j d hledger-lib-1.2/Hledger/Reports/PostingsReport.hs0000644000000000000000000005641613035210046020274 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances, TupleSections, OverloadedStrings #-} {-| Postings report, used by the register command. -} module Hledger.Reports.PostingsReport ( PostingsReport, PostingsReportItem, postingsReport, mkpostingsReportItem, -- * Tests tests_Hledger_Reports_PostingsReport ) where import Data.List import Data.Maybe import Data.Ord (comparing) -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Safe (headMay, lastMay) import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Utils import Hledger.Reports.ReportOptions -- | A postings report is a list of postings with a running total, a label -- for the total field, and a little extra transaction info to help with rendering. -- This is used eg for the register command. type PostingsReport = (String -- label for the running balance column XXX remove ,[PostingsReportItem] -- line items, one per posting ) type PostingsReportItem = (Maybe Day -- The posting date, if this is the first posting in a -- transaction or if it's different from the previous -- posting's date. Or if this a summary posting, the -- report interval's start date if this is the first -- summary posting in the interval. ,Maybe Day -- If this is a summary posting, the report interval's -- end date if this is the first summary posting in -- the interval. ,Maybe String -- The posting's transaction's description, if this is the first posting in the transaction. ,Posting -- The posting, possibly with the account name depth-clipped. ,MixedAmount -- The running total after this posting, or with --average, -- the running average posting amount. With --historical, -- postings before the report start date are included in -- the running total/average. ) -- | Select postings from the journal and add running balance and other -- information to make a postings report. Used by eg hledger's register command. postingsReport :: ReportOpts -> Query -> Journal -> PostingsReport postingsReport opts q j = (totallabel, items) where reportspan = adjustReportDates opts q j whichdate = whichDateFromOpts opts depth = queryDepth q -- postings to be included in the report, and similarly-matched postings before the report start date (precedingps, reportps) = matchedPostingsBeforeAndDuring opts q j reportspan -- postings or pseudo postings to be displayed displayps | interval == NoInterval = map (,Nothing) reportps | otherwise = summarisePostingsByInterval interval whichdate depth showempty reportspan reportps where interval = interval_ opts -- XXX showempty = empty_ opts || average_ opts -- posting report items ready for display items = dbg1 "postingsReport items" $ postingsReportItems displayps (nullposting,Nothing) whichdate depth startbal runningcalc startnum where historical = balancetype_ opts == HistoricalBalance precedingsum = sumPostings precedingps precedingavg | null precedingps = 0 | otherwise = precedingsum `divideMixedAmount` (fromIntegral $ length precedingps) startbal | average_ opts = if historical then precedingavg else 0 | otherwise = if historical then precedingsum else 0 startnum = if historical then length precedingps + 1 else 1 runningcalc | average_ opts = \i avg amt -> avg + (amt - avg) `divideMixedAmount` (fromIntegral i) -- running average | otherwise = \_ bal amt -> bal + amt -- running total totallabel = "Total" -- | Adjust report start/end dates to more useful ones based on -- journal data and report intervals. Ie: -- 1. If the start date is unspecified, use the earliest date in the journal (if any) -- 2. If the end date is unspecified, use the latest date in the journal (if any) -- 3. If a report interval is specified, enlarge the dates to enclose whole intervals adjustReportDates :: ReportOpts -> Query -> Journal -> DateSpan adjustReportDates opts q j = reportspan where -- see also multiBalanceReport requestedspan = dbg1 "requestedspan" $ queryDateSpan' q -- span specified by -b/-e/-p options and query args journalspan = dbg1 "journalspan" $ dates `spanUnion` date2s -- earliest and latest dates (or date2s) in the journal where dates = journalDateSpan False j date2s = journalDateSpan True j requestedspanclosed = dbg1 "requestedspanclosed" $ requestedspan `spanDefaultsFrom` journalspan -- if open-ended, close it using the journal's dates (if any) intervalspans = dbg1 "intervalspans" $ splitSpan (interval_ opts) requestedspanclosed -- get the whole intervals enclosing that mreportstart = dbg1 "reportstart" $ maybe Nothing spanStart $ headMay intervalspans -- start of the first interval, or open ended mreportend = dbg1 "reportend" $ maybe Nothing spanEnd $ lastMay intervalspans -- end of the last interval, or open ended reportspan = dbg1 "reportspan" $ DateSpan mreportstart mreportend -- the requested span enlarged to whole intervals if possible -- | Find postings matching a given query, within a given date span, -- and also any similarly-matched postings before that date span. -- Date restrictions and depth restrictions in the query are ignored. -- A helper for the postings report. matchedPostingsBeforeAndDuring :: ReportOpts -> Query -> Journal -> DateSpan -> ([Posting],[Posting]) matchedPostingsBeforeAndDuring opts q j (DateSpan mstart mend) = dbg1 "beforeps, duringps" $ span (beforestartq `matchesPosting`) beforeandduringps where beforestartq = dbg1 "beforestartq" $ dateqtype $ DateSpan Nothing mstart beforeandduringps = dbg1 "ps4" $ sortBy (comparing sortdate) $ -- sort postings by date or date2 dbg1 "ps3" $ map (filterPostingAmount symq) $ -- remove amount parts which the query's cur: terms would exclude dbg1 "ps2" $ (if related_ opts then concatMap relatedPostings else id) $ -- with -r, replace each with its sibling postings dbg1 "ps1" $ filter (beforeandduringq `matchesPosting`) $ -- filter postings by the query, with no start date or depth limit journalPostings $ journalSelectingAmountFromOpts opts j where beforeandduringq = dbg1 "beforeandduringq" $ And [depthless $ dateless q, beforeendq] where depthless = filterQuery (not . queryIsDepth) dateless = filterQuery (not . queryIsDateOrDate2) beforeendq = dateqtype $ DateSpan Nothing mend sortdate = if date2_ opts then postingDate2 else postingDate symq = dbg1 "symq" $ filterQuery queryIsSym q dateqtype | queryIsDate2 dateq || (queryIsDate dateq && date2_ opts) = Date2 | otherwise = Date where dateq = dbg1 "dateq" $ filterQuery queryIsDateOrDate2 $ dbg1 "q" q -- XXX confused by multiple date:/date2: ? -- | Generate postings report line items from a list of postings or (with -- non-Nothing dates attached) summary postings. postingsReportItems :: [(Posting,Maybe Day)] -> (Posting,Maybe Day) -> WhichDate -> Int -> MixedAmount -> (Int -> MixedAmount -> MixedAmount -> MixedAmount) -> Int -> [PostingsReportItem] postingsReportItems [] _ _ _ _ _ _ = [] postingsReportItems ((p,menddate):ps) (pprev,menddateprev) wd d b runningcalcfn itemnum = i:(postingsReportItems ps (p,menddate) wd d b' runningcalcfn (itemnum+1)) where i = mkpostingsReportItem showdate showdesc wd menddate p' b' (showdate, showdesc) | isJust menddate = (menddate /= menddateprev, False) | otherwise = (isfirstintxn || isdifferentdate, isfirstintxn) isfirstintxn = ptransaction p /= ptransaction pprev isdifferentdate = case wd of PrimaryDate -> postingDate p /= postingDate pprev SecondaryDate -> postingDate2 p /= postingDate2 pprev p' = p{paccount= clipOrEllipsifyAccountName d $ paccount p} b' = runningcalcfn itemnum b (pamount p) -- | Generate one postings report line item, containing the posting, -- the current running balance, and optionally the posting date and/or -- the transaction description. mkpostingsReportItem :: Bool -> Bool -> WhichDate -> Maybe Day -> Posting -> MixedAmount -> PostingsReportItem mkpostingsReportItem showdate showdesc wd menddate p b = (if showdate then Just date else Nothing ,menddate ,if showdesc then Just desc else Nothing ,p ,b ) where date = case wd of PrimaryDate -> postingDate p SecondaryDate -> postingDate2 p desc = T.unpack $ maybe "" tdescription $ ptransaction p -- | Convert a list of postings into summary postings, one per interval, -- aggregated to the specified depth if any. summarisePostingsByInterval :: Interval -> WhichDate -> Int -> Bool -> DateSpan -> [Posting] -> [SummaryPosting] summarisePostingsByInterval interval wd depth showempty reportspan ps = concatMap summarisespan $ splitSpan interval reportspan where summarisespan s = summarisePostingsInDateSpan s wd depth showempty (postingsinspan s) postingsinspan s = filter (isPostingInDateSpan' wd s) ps tests_summarisePostingsByInterval = [ "summarisePostingsByInterval" ~: do summarisePostingsByInterval (Quarters 1) PrimaryDate 99999 False (DateSpan Nothing Nothing) [] ~?= [] ] -- | A summary posting summarises the activity in one account within a report -- interval. It is currently kludgily represented by a regular Posting with no -- description, the interval's start date stored as the posting date, and the -- interval's end date attached with a tuple. type SummaryPosting = (Posting, Maybe Day) -- | Given a date span (representing a report interval) and a list of -- postings within it, aggregate the postings into one summary posting per -- account. -- -- When a depth argument is present, postings to accounts of greater -- depth are also aggregated where possible. If the depth is 0, all -- postings in the span are aggregated into a single posting with -- account name "...". -- -- The showempty flag includes spans with no postings and also postings -- with 0 amount. -- summarisePostingsInDateSpan :: DateSpan -> WhichDate -> Int -> Bool -> [Posting] -> [SummaryPosting] summarisePostingsInDateSpan (DateSpan b e) wd depth showempty ps | null ps && (isNothing b || isNothing e) = [] | null ps && showempty = [(summaryp, Just e')] | otherwise = summarypes where postingdate = if wd == PrimaryDate then postingDate else postingDate2 b' = fromMaybe (maybe nulldate postingdate $ headMay ps) b e' = fromMaybe (maybe (addDays 1 nulldate) postingdate $ lastMay ps) e summaryp = nullposting{pdate=Just b'} clippedanames | depth > 0 = nub $ map (clipAccountName depth) anames | otherwise = ["..."] summaryps | depth > 0 = [summaryp{paccount=a,pamount=balance a} | a <- clippedanames] | otherwise = [summaryp{paccount="...",pamount=sum $ map pamount ps}] summarypes = map (, Just e') $ (if showempty then id else filter (not . isZeroMixedAmount . pamount)) summaryps anames = sort $ nub $ map paccount ps -- aggregate balances by account, like ledgerFromJournal, then do depth-clipping accts = accountsFromPostings ps balance a = maybe nullmixedamt bal $ lookupAccount a accts where bal = if isclipped a then aibalance else aebalance isclipped a = accountNameLevel a >= depth -- tests_summarisePostingsInDateSpan = [ -- "summarisePostingsInDateSpan" ~: do -- let gives (b,e,depth,showempty,ps) = -- (summarisePostingsInDateSpan (mkdatespan b e) depth showempty ps `is`) -- let ps = -- [ -- nullposting{lpdescription="desc",lpaccount="expenses:food:groceries",lpamount=Mixed [usd 1]} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food:dining", lpamount=Mixed [usd 2]} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food", lpamount=Mixed [usd 4]} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food:dining", lpamount=Mixed [usd 8]} -- ] -- ("2008/01/01","2009/01/01",0,9999,False,[]) `gives` -- [] -- ("2008/01/01","2009/01/01",0,9999,True,[]) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31"} -- ] -- ("2008/01/01","2009/01/01",0,9999,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food", lpamount=Mixed [usd 4]} -- ,nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food:dining", lpamount=Mixed [usd 10]} -- ,nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food:groceries",lpamount=Mixed [usd 1]} -- ] -- ("2008/01/01","2009/01/01",0,2,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food",lpamount=Mixed [usd 15]} -- ] -- ("2008/01/01","2009/01/01",0,1,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses",lpamount=Mixed [usd 15]} -- ] -- ("2008/01/01","2009/01/01",0,0,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="",lpamount=Mixed [usd 15]} -- ] tests_postingsReport = [ "postingsReport" ~: do -- with the query specified explicitly let (query, journal) `gives` n = (length $ snd $ postingsReport defreportopts query journal) `is` n (Any, nulljournal) `gives` 0 (Any, samplejournal) `gives` 11 -- register --depth just clips account names (Depth 2, samplejournal) `gives` 11 (And [Depth 1, Status Cleared, Acct "expenses"], samplejournal) `gives` 2 (And [And [Depth 1, Status Cleared], Acct "expenses"], samplejournal) `gives` 2 -- with query and/or command-line options assertEqual "" 11 (length $ snd $ postingsReport defreportopts Any samplejournal) assertEqual "" 9 (length $ snd $ postingsReport defreportopts{interval_=Months 1} Any samplejournal) assertEqual "" 19 (length $ snd $ postingsReport defreportopts{interval_=Months 1, empty_=True} Any samplejournal) assertEqual "" 4 (length $ snd $ postingsReport defreportopts (Acct "assets:bank:checking") samplejournal) -- (defreportopts, And [Acct "a a", Acct "'b"], samplejournal2) `gives` 0 -- [(Just (parsedate "2008-01-01","income"),assets:bank:checking $1,$1) -- ,(Nothing,income:salary $-1,0) -- ,(Just (2008-06-01,"gift"),assets:bank:checking $1,$1) -- ,(Nothing,income:gifts $-1,0) -- ,(Just (2008-06-02,"save"),assets:bank:saving $1,$1) -- ,(Nothing,assets:bank:checking $-1,0) -- ,(Just (2008-06-03,"eat & shop"),expenses:food $1,$1) -- ,(Nothing,expenses:supplies $1,$2) -- ,(Nothing,assets:cash $-2,0) -- ,(Just (2008-12-31,"pay off"),liabilities:debts $1,$1) -- ,(Nothing,assets:bank:checking $-1,0) -- ] {- let opts = defreportopts (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank:checking $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank:checking $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank:saving $1 $1" ," assets:bank:checking $-1 0" ,"2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report with cleared option" ~: do let opts = defreportopts{cleared_=True} j <- readJournal' sample_journal_str (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report with uncleared option" ~: do let opts = defreportopts{uncleared_=True} j <- readJournal' sample_journal_str (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank:checking $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank:checking $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank:saving $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report sorts by date" ~: do j <- readJournal' $ unlines ["2008/02/02 a" ," b 1" ," c" ,"" ,"2008/01/01 d" ," e 1" ," f" ] let opts = defreportopts registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/02/02"] ,"postings report with account pattern" ~: do j <- samplejournal let opts = defreportopts{patterns_=["cash"]} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop assets:cash $-2 $-2" ] ,"postings report with account pattern, case insensitive" ~: do j <- samplejournal let opts = defreportopts{patterns_=["cAsH"]} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop assets:cash $-2 $-2" ] ,"postings report with display expression" ~: do j <- samplejournal let gives displayexpr = (registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is`) where opts = defreportopts{display_=Just displayexpr} "d<[2008/6/2]" `gives` ["2008/01/01","2008/06/01"] "d<=[2008/6/2]" `gives` ["2008/01/01","2008/06/01","2008/06/02"] "d=[2008/6/2]" `gives` ["2008/06/02"] "d>=[2008/6/2]" `gives` ["2008/06/02","2008/06/03","2008/12/31"] "d>[2008/6/2]" `gives` ["2008/06/03","2008/12/31"] ,"postings report with period expression" ~: do j <- samplejournal let periodexpr `gives` dates = do j' <- samplejournal registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j') `is` dates where opts = defreportopts{period_=maybePeriod date1 periodexpr} "" `gives` ["2008/01/01","2008/06/01","2008/06/02","2008/06/03","2008/12/31"] "2008" `gives` ["2008/01/01","2008/06/01","2008/06/02","2008/06/03","2008/12/31"] "2007" `gives` [] "june" `gives` ["2008/06/01","2008/06/02","2008/06/03"] "monthly" `gives` ["2008/01/01","2008/06/01","2008/12/01"] "quarterly" `gives` ["2008/01/01","2008/04/01","2008/10/01"] let opts = defreportopts{period_=maybePeriod date1 "yearly"} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 - 2008/12/31 assets:bank:saving $1 $1" ," assets:cash $-2 $-1" ," expenses:food $1 0" ," expenses:supplies $1 $1" ," income:gifts $-1 0" ," income:salary $-1 $-1" ," liabilities:debts $1 0" ] let opts = defreportopts{period_=maybePeriod date1 "quarterly"} registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/04/01","2008/10/01"] let opts = defreportopts{period_=maybePeriod date1 "quarterly",empty_=True} registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/04/01","2008/07/01","2008/10/01"] ] , "postings report with depth arg" ~: do j <- samplejournal let opts = defreportopts{depth_=Just 2} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank $1 $1" ," assets:bank $-1 0" ,"2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank $-1 0" ] -} ] tests_Hledger_Reports_PostingsReport :: Test tests_Hledger_Reports_PostingsReport = TestList $ tests_summarisePostingsByInterval ++ tests_postingsReport hledger-lib-1.2/Hledger/Reports/ReportOptions.hs0000644000000000000000000003550513066774455020144 0ustar0000000000000000{-# LANGUAGE CPP, RecordWildCards, DeriveDataTypeable #-} {-| Options common to most hledger reports. -} module Hledger.Reports.ReportOptions ( ReportOpts(..), BalanceType(..), AccountListMode(..), FormatStr, defreportopts, rawOptsToReportOpts, checkReportOpts, flat_, tree_, whichDateFromOpts, journalSelectingAmountFromOpts, queryFromOpts, queryFromOptsOnly, queryOptsFromOpts, transactionDateFn, postingDateFn, reportStartDate, reportEndDate, reportStartEndDates, tests_Hledger_Reports_ReportOptions ) where import Data.Data (Data) #if !MIN_VERSION_base(4,8,0) import Data.Functor.Compat ((<$>)) #endif import Data.Maybe import qualified Data.Text as T import Data.Typeable (Typeable) import Data.Time.Calendar import Data.Default import Safe import Test.HUnit import Text.Megaparsec.Error import Hledger.Data import Hledger.Query import Hledger.Utils type FormatStr = String -- | Which "balance" is being shown in a balance report. data BalanceType = PeriodChange -- ^ The change of balance in each period. | CumulativeChange -- ^ The accumulated change across multiple periods. | HistoricalBalance -- ^ The historical ending balance, including the effect of -- all postings before the report period. Unless altered by, -- a query, this is what you would see on a bank statement. deriving (Eq,Show,Data,Typeable) instance Default BalanceType where def = PeriodChange -- | Should accounts be displayed: in the command's default style, hierarchically, or as a flat list ? data AccountListMode = ALDefault | ALTree | ALFlat deriving (Eq, Show, Data, Typeable) instance Default AccountListMode where def = ALDefault -- | Standard options for customising report filtering and output, -- corresponding to hledger's command-line options and query language -- arguments. Used in hledger-lib and above. data ReportOpts = ReportOpts { period_ :: Period ,interval_ :: Interval ,clearedstatus_ :: Maybe ClearedStatus ,cost_ :: Bool ,depth_ :: Maybe Int ,display_ :: Maybe DisplayExp ,date2_ :: Bool ,empty_ :: Bool ,no_elide_ :: Bool ,real_ :: Bool ,format_ :: Maybe FormatStr ,query_ :: String -- all arguments, as a string -- register only ,average_ :: Bool ,related_ :: Bool -- balance only ,balancetype_ :: BalanceType ,accountlistmode_ :: AccountListMode ,drop_ :: Int ,row_total_ :: Bool ,no_total_ :: Bool ,value_ :: Bool ,pretty_tables_ :: Bool } deriving (Show, Data, Typeable) instance Default ReportOpts where def = defreportopts instance Default Bool where def = False defreportopts :: ReportOpts defreportopts = ReportOpts def def def def def def def def def def def def def def def def def def def def def rawOptsToReportOpts :: RawOpts -> IO ReportOpts rawOptsToReportOpts rawopts = checkReportOpts <$> do d <- getCurrentDay let rawopts' = checkRawOpts rawopts return defreportopts{ period_ = periodFromRawOpts d rawopts' ,interval_ = intervalFromRawOpts rawopts' ,clearedstatus_ = clearedStatusFromRawOpts rawopts' ,cost_ = boolopt "cost" rawopts' ,depth_ = maybeintopt "depth" rawopts' ,display_ = maybedisplayopt d rawopts' ,date2_ = boolopt "date2" rawopts' ,empty_ = boolopt "empty" rawopts' ,no_elide_ = boolopt "no-elide" rawopts' ,real_ = boolopt "real" rawopts' ,format_ = maybestringopt "format" rawopts' -- XXX move to CliOpts or move validation from Cli.CliOptions to here ,query_ = unwords $ listofstringopt "args" rawopts' -- doesn't handle an arg like "" right ,average_ = boolopt "average" rawopts' ,related_ = boolopt "related" rawopts' ,balancetype_ = balancetypeopt rawopts' ,accountlistmode_ = accountlistmodeopt rawopts' ,drop_ = intopt "drop" rawopts' ,row_total_ = boolopt "row-total" rawopts' ,no_total_ = boolopt "no-total" rawopts' ,value_ = boolopt "value" rawopts' ,pretty_tables_ = boolopt "pretty-tables" rawopts' } -- | Do extra validation of raw option values, raising an error if there's a problem. checkRawOpts :: RawOpts -> RawOpts checkRawOpts rawopts -- our standard behaviour is to accept conflicting options actually, -- using the last one - more forgiving for overriding command-line aliases -- | countopts ["change","cumulative","historical"] > 1 -- = usageError "please specify at most one of --change, --cumulative, --historical" -- | countopts ["flat","tree"] > 1 -- = usageError "please specify at most one of --flat, --tree" -- | countopts ["daily","weekly","monthly","quarterly","yearly"] > 1 -- = usageError "please specify at most one of --daily, " | otherwise = rawopts -- where -- countopts = length . filter (`boolopt` rawopts) -- | Do extra validation of report options, raising an error if there's a problem. checkReportOpts :: ReportOpts -> ReportOpts checkReportOpts ropts@ReportOpts{..} = either usageError (const ropts) $ do case depth_ of Just d | d < 0 -> Left "--depth should have a positive number" _ -> Right () accountlistmodeopt :: RawOpts -> AccountListMode accountlistmodeopt rawopts = case reverse $ filter (`elem` ["tree","flat"]) $ map fst rawopts of ("tree":_) -> ALTree ("flat":_) -> ALFlat _ -> ALDefault balancetypeopt :: RawOpts -> BalanceType balancetypeopt rawopts = case reverse $ filter (`elem` ["change","cumulative","historical"]) $ map fst rawopts of ("historical":_) -> HistoricalBalance ("cumulative":_) -> CumulativeChange _ -> PeriodChange -- Get the period specified by the intersection of -b/--begin, -e/--end and/or -- -p/--period options, using the given date to interpret relative date expressions. periodFromRawOpts :: Day -> RawOpts -> Period periodFromRawOpts d rawopts = case (mearliestb, mlateste) of (Nothing, Nothing) -> PeriodAll (Just b, Nothing) -> PeriodFrom b (Nothing, Just e) -> PeriodTo e (Just b, Just e) -> simplifyPeriod $ PeriodBetween b e where mearliestb = case beginDatesFromRawOpts d rawopts of [] -> Nothing bs -> Just $ minimum bs mlateste = case endDatesFromRawOpts d rawopts of [] -> Nothing es -> Just $ maximum es -- Get all begin dates specified by -b/--begin or -p/--period options, in order, -- using the given date to interpret relative date expressions. beginDatesFromRawOpts :: Day -> RawOpts -> [Day] beginDatesFromRawOpts d = catMaybes . map (begindatefromrawopt d) where begindatefromrawopt d (n,v) | n == "begin" = either (\e -> usageError $ "could not parse "++n++" date: "++parseErrorPretty e) Just $ fixSmartDateStrEither' d (T.pack v) | n == "period" = case either (\e -> usageError $ "could not parse period option: "++parseErrorPretty e) id $ parsePeriodExpr d (stripquotes $ T.pack v) of (_, DateSpan (Just b) _) -> Just b _ -> Nothing | otherwise = Nothing -- Get all end dates specified by -e/--end or -p/--period options, in order, -- using the given date to interpret relative date expressions. endDatesFromRawOpts :: Day -> RawOpts -> [Day] endDatesFromRawOpts d = catMaybes . map (enddatefromrawopt d) where enddatefromrawopt d (n,v) | n == "end" = either (\e -> usageError $ "could not parse "++n++" date: "++parseErrorPretty e) Just $ fixSmartDateStrEither' d (T.pack v) | n == "period" = case either (\e -> usageError $ "could not parse period option: "++parseErrorPretty e) id $ parsePeriodExpr d (stripquotes $ T.pack v) of (_, DateSpan _ (Just e)) -> Just e _ -> Nothing | otherwise = Nothing -- | Get the report interval, if any, specified by the last of -p/--period, -- -D/--daily, -W/--weekly, -M/--monthly etc. options. intervalFromRawOpts :: RawOpts -> Interval intervalFromRawOpts = lastDef NoInterval . catMaybes . map intervalfromrawopt where intervalfromrawopt (n,v) | n == "period" = either (\e -> usageError $ "could not parse period option: "++parseErrorPretty e) (Just . fst) $ parsePeriodExpr nulldate (stripquotes $ T.pack v) -- reference date does not affect the interval | n == "daily" = Just $ Days 1 | n == "weekly" = Just $ Weeks 1 | n == "monthly" = Just $ Months 1 | n == "quarterly" = Just $ Quarters 1 | n == "yearly" = Just $ Years 1 | otherwise = Nothing -- | Get the cleared status, if any, specified by the last of -C/--cleared, -- --pending, -U/--uncleared options. clearedStatusFromRawOpts :: RawOpts -> Maybe ClearedStatus clearedStatusFromRawOpts = lastMay . catMaybes . map clearedstatusfromrawopt where clearedstatusfromrawopt (n,_) | n == "cleared" = Just Cleared | n == "pending" = Just Pending | n == "uncleared" = Just Uncleared | otherwise = Nothing type DisplayExp = String maybedisplayopt :: Day -> RawOpts -> Maybe DisplayExp maybedisplayopt d rawopts = maybe Nothing (Just . regexReplaceBy "\\[.+?\\]" fixbracketeddatestr) $ maybestringopt "display" rawopts where fixbracketeddatestr "" = "" fixbracketeddatestr s = "[" ++ fixSmartDateStr d (T.pack $ init $ tail s) ++ "]" -- | Select the Transaction date accessor based on --date2. transactionDateFn :: ReportOpts -> (Transaction -> Day) transactionDateFn ReportOpts{..} = if date2_ then transactionDate2 else tdate -- | Select the Posting date accessor based on --date2. postingDateFn :: ReportOpts -> (Posting -> Day) postingDateFn ReportOpts{..} = if date2_ then postingDate2 else postingDate -- | Report which date we will report on based on --date2. whichDateFromOpts :: ReportOpts -> WhichDate whichDateFromOpts ReportOpts{..} = if date2_ then SecondaryDate else PrimaryDate -- | Legacy-compatible convenience aliases for accountlistmode_. tree_ :: ReportOpts -> Bool tree_ = (==ALTree) . accountlistmode_ flat_ :: ReportOpts -> Bool flat_ = (==ALFlat) . accountlistmode_ -- depthFromOpts :: ReportOpts -> Int -- depthFromOpts opts = min (fromMaybe 99999 $ depth_ opts) (queryDepth $ queryFromOpts nulldate opts) -- | Convert this journal's postings' amounts to the cost basis amounts if -- specified by options. journalSelectingAmountFromOpts :: ReportOpts -> Journal -> Journal journalSelectingAmountFromOpts opts | cost_ opts = journalConvertAmountsToCost | otherwise = id -- | Convert report options and arguments to a query. queryFromOpts :: Day -> ReportOpts -> Query queryFromOpts d ReportOpts{..} = simplifyQuery $ And $ [flagsq, argsq] where flagsq = And $ [(if date2_ then Date2 else Date) $ periodAsDateSpan period_] ++ (if real_ then [Real True] else []) ++ (if empty_ then [Empty True] else []) -- ? ++ (maybe [] ((:[]) . Status) clearedstatus_) ++ (maybe [] ((:[]) . Depth) depth_) argsq = fst $ parseQuery d (T.pack query_) -- | Convert report options to a query, ignoring any non-flag command line arguments. queryFromOptsOnly :: Day -> ReportOpts -> Query queryFromOptsOnly _d ReportOpts{..} = simplifyQuery flagsq where flagsq = And $ [(if date2_ then Date2 else Date) $ periodAsDateSpan period_] ++ (if real_ then [Real True] else []) ++ (if empty_ then [Empty True] else []) -- ? ++ (maybe [] ((:[]) . Status) clearedstatus_) ++ (maybe [] ((:[]) . Depth) depth_) tests_queryFromOpts :: [Test] tests_queryFromOpts = [ "queryFromOpts" ~: do assertEqual "" Any (queryFromOpts nulldate defreportopts) assertEqual "" (Acct "a") (queryFromOpts nulldate defreportopts{query_="a"}) assertEqual "" (Desc "a a") (queryFromOpts nulldate defreportopts{query_="desc:'a a'"}) assertEqual "" (Date $ mkdatespan "2012/01/01" "2013/01/01") (queryFromOpts nulldate defreportopts{period_=PeriodFrom (parsedate "2012/01/01") ,query_="date:'to 2013'" }) assertEqual "" (Date2 $ mkdatespan "2012/01/01" "2013/01/01") (queryFromOpts nulldate defreportopts{query_="date2:'in 2012'"}) assertEqual "" (Or [Acct "a a", Acct "'b"]) (queryFromOpts nulldate defreportopts{query_="'a a' 'b"}) ] -- | Convert report options and arguments to query options. queryOptsFromOpts :: Day -> ReportOpts -> [QueryOpt] queryOptsFromOpts d ReportOpts{..} = flagsqopts ++ argsqopts where flagsqopts = [] argsqopts = snd $ parseQuery d (T.pack query_) tests_queryOptsFromOpts :: [Test] tests_queryOptsFromOpts = [ "queryOptsFromOpts" ~: do assertEqual "" [] (queryOptsFromOpts nulldate defreportopts) assertEqual "" [] (queryOptsFromOpts nulldate defreportopts{query_="a"}) assertEqual "" [] (queryOptsFromOpts nulldate defreportopts{period_=PeriodFrom (parsedate "2012/01/01") ,query_="date:'to 2013'" }) ] -- | The effective report start date is the one specified by options or queries, -- otherwise the earliest transaction or posting date in the journal, -- otherwise (for an empty journal) nothing. -- Needs IO to parse smart dates in options/queries. reportStartDate :: Journal -> ReportOpts -> IO (Maybe Day) reportStartDate j ropts = (fst <$>) <$> reportStartEndDates j ropts -- | The effective report end date is the one specified by options or queries, -- otherwise the latest transaction or posting date in the journal, -- otherwise (for an empty journal) nothing. -- Needs IO to parse smart dates in options/queries. reportEndDate :: Journal -> ReportOpts -> IO (Maybe Day) reportEndDate j ropts = (snd <$>) <$> reportStartEndDates j ropts reportStartEndDates :: Journal -> ReportOpts -> IO (Maybe (Day,Day)) reportStartEndDates j ropts = do today <- getCurrentDay let q = queryFromOpts today ropts mrequestedstartdate = queryStartDate False q mrequestedenddate = queryEndDate False q return $ case journalDateSpan False j of -- don't bother with secondary dates DateSpan (Just journalstartdate) (Just journalenddate) -> Just (fromMaybe journalstartdate mrequestedstartdate, fromMaybe journalenddate mrequestedenddate) _ -> Nothing tests_Hledger_Reports_ReportOptions :: Test tests_Hledger_Reports_ReportOptions = TestList $ tests_queryFromOpts ++ tests_queryOptsFromOpts hledger-lib-1.2/Hledger/Reports/TransactionsReports.hs0000644000000000000000000003305413035210046021312 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Here are several variants of a transactions report. Transactions reports are like a postings report, but more transaction-oriented, and (in the account-centric variant) relative to a some base account. They are used by hledger-web. -} module Hledger.Reports.TransactionsReports ( TransactionsReport, TransactionsReportItem, AccountTransactionsReport, AccountTransactionsReportItem, triOrigTransaction, triDate, triAmount, triBalance, triCommodityAmount, triCommodityBalance, journalTransactionsReport, accountTransactionsReport, transactionsReportByCommodity, transactionRegisterDate -- -- * Tests -- tests_Hledger_Reports_TransactionsReports ) where import Data.List import Data.Ord -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar -- import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Reports.ReportOptions import Hledger.Utils.Debug -- | A transactions report includes a list of transactions -- (posting-filtered and unfiltered variants), a running balance, and some -- other information helpful for rendering a register view (a flag -- indicating multiple other accounts and a display string describing -- them) with or without a notion of current account(s). -- Two kinds of report use this data structure, see journalTransactionsReport -- and accountTransactionsReport below for detais. type TransactionsReport = (String -- label for the balance column, eg "balance" or "total" ,[TransactionsReportItem] -- line items, one per transaction ) type TransactionsReportItem = (Transaction -- the original journal transaction, unmodified ,Transaction -- the transaction as seen from a particular account, with postings maybe filtered ,Bool -- is this a split, ie more than one other account posting ,String -- a display string describing the other account(s), if any ,MixedAmount -- the amount posted to the current account(s) by the filtered postings (or total amount posted) ,MixedAmount -- the running total of item amounts, starting from zero; -- or with --historical, the running total including items -- (matched by the report query) preceding the report period ) triOrigTransaction (torig,_,_,_,_,_) = torig triDate (_,tacct,_,_,_,_) = tdate tacct triAmount (_,_,_,_,a,_) = a triBalance (_,_,_,_,_,a) = a triCommodityAmount c = filterMixedAmountByCommodity c . triAmount triCommodityBalance c = filterMixedAmountByCommodity c . triBalance ------------------------------------------------------------------------------- -- | Select transactions from the whole journal. This is similar to a -- "postingsReport" except with transaction-based report items which -- are ordered most recent first. XXX Or an EntriesReport - use that instead ? -- This is used by hledger-web's journal view. journalTransactionsReport :: ReportOpts -> Journal -> Query -> TransactionsReport journalTransactionsReport opts j q = (totallabel, items) where -- XXX items' first element should be the full transaction with all postings items = reverse $ accountTransactionsReportItems q None nullmixedamt id ts ts = sortBy (comparing date) $ filter (q `matchesTransaction`) $ jtxns $ journalSelectingAmountFromOpts opts j date = transactionDateFn opts ------------------------------------------------------------------------------- -- | An account transactions report represents transactions affecting -- a particular account (or possibly several accounts, but we don't -- use that). It is used eg by hledger-ui's and hledger-web's account -- register view, where we want to show one row per transaction, in -- the context of the current account. Report items consist of: -- -- - the transaction, unmodified -- -- - the transaction as seen in the context of the current account and query, -- which means: -- -- - the transaction date is set to the "transaction context date", -- which can be different from the transaction's general date: -- if postings to the current account (and matched by the report query) -- have their own dates, it's the earliest of these dates. -- -- - the transaction's postings are filtered, excluding any which are not -- matched by the report query -- -- - a text description of the other account(s) posted to/from -- -- - a flag indicating whether there's more than one other account involved -- -- - the total increase/decrease to the current account -- -- - the report transactions' running total after this transaction; -- or if historical balance is requested (-H), the historical running total. -- The historical running total includes transactions from before the -- report start date if one is specified, filtered by the report query. -- The historical running total may or may not be the account's historical -- running balance, depending on the report query. -- -- Items are sorted by transaction register date (the earliest date the transaction -- posts to the current account), most recent first. -- Reporting intervals are currently ignored. -- type AccountTransactionsReport = (String -- label for the balance column, eg "balance" or "total" ,[AccountTransactionsReportItem] -- line items, one per transaction ) type AccountTransactionsReportItem = ( Transaction -- the transaction, unmodified ,Transaction -- the transaction, as seen from the current account ,Bool -- is this a split (more than one posting to other accounts) ? ,String -- a display string describing the other account(s), if any ,MixedAmount -- the amount posted to the current account(s) (or total amount posted) ,MixedAmount -- the register's running total or the current account(s)'s historical balance, after this transaction ) accountTransactionsReport :: ReportOpts -> Journal -> Query -> Query -> AccountTransactionsReport accountTransactionsReport opts j reportq thisacctq = (label, items) where -- a depth limit does not affect the account transactions report -- seems unnecessary for some reason XXX reportq' = -- filterQuery (not . queryIsDepth) reportq -- get all transactions, with amounts converted to cost basis if -B ts1 = jtxns $ journalSelectingAmountFromOpts opts j -- apply any cur:SYM filters in reportq' symq = filterQuery queryIsSym reportq' ts2 = (if queryIsNull symq then id else map (filterTransactionAmounts symq)) ts1 -- keep just the transactions affecting this account (via possibly realness or status-filtered postings) realq = filterQuery queryIsReal reportq' statusq = filterQuery queryIsStatus reportq' ts3 = filter (matchesTransaction thisacctq . filterTransactionPostings (And [realq, statusq])) ts2 -- sort by the transaction's register date, for accurate starting balance ts = sortBy (comparing (transactionRegisterDate reportq' thisacctq)) ts3 (startbal,label) | balancetype_ opts == HistoricalBalance = (sumPostings priorps, balancelabel) | otherwise = (nullmixedamt, totallabel) where priorps = dbg1 "priorps" $ filter (matchesPosting (dbg1 "priorq" $ And [thisacctq, tostartdateq, datelessreportq])) $ transactionsPostings ts tostartdateq = case mstartdate of Just _ -> Date (DateSpan Nothing mstartdate) Nothing -> None -- no start date specified, there are no prior postings mstartdate = queryStartDate (date2_ opts) reportq' datelessreportq = filterQuery (not . queryIsDateOrDate2) reportq' items = reverse $ -- see also registerChartHtml accountTransactionsReportItems reportq' thisacctq startbal negate ts totallabel = "Period Total" balancelabel = "Historical Total" -- | Generate transactions report items from a list of transactions, -- using the provided user-specified report query, a query specifying -- which account to use as the focus, a starting balance, a sign-setting -- function and a balance-summing function. Or with a None current account -- query, this can also be used for the journalTransactionsReport. accountTransactionsReportItems :: Query -> Query -> MixedAmount -> (MixedAmount -> MixedAmount) -> [Transaction] -> [TransactionsReportItem] accountTransactionsReportItems _ _ _ _ [] = [] accountTransactionsReportItems reportq thisacctq bal signfn (torig:ts) = case i of Just i' -> i':is Nothing -> is -- 201403: This is used for both accountTransactionsReport and journalTransactionsReport, which makes it a bit overcomplicated -- 201407: I've lost my grip on this, let's just hope for the best -- 201606: we now calculate change and balance from filtered postings, check this still works well for all callers XXX where tfiltered@Transaction{tpostings=reportps} = filterTransactionPostings reportq torig tacct = tfiltered{tdate=transactionRegisterDate reportq thisacctq tfiltered} (i,bal') = case reportps of [] -> (Nothing,bal) -- no matched postings in this transaction, skip it _ -> (Just (torig, tacct, numotheraccts > 1, otheracctstr, a, b), b) where (thisacctps, otheracctps) = partition (matchesPosting thisacctq) reportps numotheraccts = length $ nub $ map paccount otheracctps otheracctstr | thisacctq == None = summarisePostingAccounts reportps -- no current account ? summarise all matched postings | numotheraccts == 0 = summarisePostingAccounts thisacctps -- only postings to current account ? summarise those | otherwise = summarisePostingAccounts otheracctps -- summarise matched postings to other account(s) a = signfn $ negate $ sum $ map pamount thisacctps b = bal + a is = accountTransactionsReportItems reportq thisacctq bal' signfn ts -- | What is the transaction's date in the context of a particular account -- (specified with a query) and report query, as in an account register ? -- It's normally the transaction's general date, but if any posting(s) -- matched by the report query and affecting the matched account(s) have -- their own earlier dates, it's the earliest of these dates. -- Secondary transaction/posting dates are ignored. transactionRegisterDate :: Query -> Query -> Transaction -> Day transactionRegisterDate reportq thisacctq t | null thisacctps = tdate t | otherwise = minimum $ map postingDate thisacctps where reportps = tpostings $ filterTransactionPostings reportq t thisacctps = filter (matchesPosting thisacctq) reportps -- -- | Generate a short readable summary of some postings, like -- -- "from (negatives) to (positives)". -- summarisePostings :: [Posting] -> String -- summarisePostings ps = -- case (summarisePostingAccounts froms, summarisePostingAccounts tos) of -- ("",t) -> "to "++t -- (f,"") -> "from "++f -- (f,t) -> "from "++f++" to "++t -- where -- (froms,tos) = partition (fromMaybe False . isNegativeMixedAmount . pamount) ps -- | Generate a simplified summary of some postings' accounts. -- To reduce noise, if there are both real and virtual postings, show only the real ones. summarisePostingAccounts :: [Posting] -> String summarisePostingAccounts ps = (intercalate ", " . map (T.unpack . accountSummarisedName) . nub . map paccount) displayps -- XXX pack where realps = filter isReal ps displayps | null realps = ps | otherwise = realps ------------------------------------------------------------------------------- -- | Split a transactions report whose items may involve several commodities, -- into one or more single-commodity transactions reports. transactionsReportByCommodity :: TransactionsReport -> [(CommoditySymbol, TransactionsReport)] transactionsReportByCommodity tr = [(c, filterTransactionsReportByCommodity c tr) | c <- transactionsReportCommodities tr] where transactionsReportCommodities (_,items) = nub $ sort $ map acommodity $ concatMap (amounts . triAmount) items -- Remove transaction report items and item amount (and running -- balance amount) components that don't involve the specified -- commodity. Other item fields such as the transaction are left unchanged. filterTransactionsReportByCommodity :: CommoditySymbol -> TransactionsReport -> TransactionsReport filterTransactionsReportByCommodity c (label,items) = (label, fixTransactionsReportItemBalances $ concat [filterTransactionsReportItemByCommodity c i | i <- items]) where filterTransactionsReportItemByCommodity c (t,t2,s,o,a,bal) | c `elem` cs = [item'] | otherwise = [] where cs = map acommodity $ amounts a item' = (t,t2,s,o,a',bal) a' = filterMixedAmountByCommodity c a fixTransactionsReportItemBalances [] = [] fixTransactionsReportItemBalances [i] = [i] fixTransactionsReportItemBalances items = reverse $ i:(go startbal is) where i:is = reverse items startbal = filterMixedAmountByCommodity c $ triBalance i go _ [] = [] go bal ((t,t2,s,o,amt,_):is) = (t,t2,s,o,amt,bal'):go bal' is where bal' = bal + amt ------------------------------------------------------------------------------- hledger-lib-1.2/Hledger/Utils.hs0000644000000000000000000001515213066746043014743 0ustar0000000000000000{-| Standard imports and utilities which are useful everywhere, or needed low in the module hierarchy. This is the bottom of hledger's module graph. -} module Hledger.Utils (---- provide these frequently used modules - or not, for clearer api: -- module Control.Monad, -- module Data.List, -- module Data.Maybe, -- module Data.Time.Calendar, -- module Data.Time.Clock, -- module Data.Time.LocalTime, -- module Data.Tree, -- module Text.RegexPR, -- module Test.HUnit, -- module Text.Printf, ---- all of this one: module Hledger.Utils, module Hledger.Utils.Debug, module Hledger.Utils.Parse, module Hledger.Utils.Regex, module Hledger.Utils.String, module Hledger.Utils.Text, module Hledger.Utils.Test, module Hledger.Utils.Tree, -- Debug.Trace.trace, -- module Data.PPrint, -- module Hledger.Utils.UTF8IOCompat SystemString,fromSystemString,toSystemString,error',userError',usageError, -- the rest need to be done in each module I think ) where import Control.Monad (liftM) -- import Data.Char import Data.List -- import Data.Maybe -- import Data.PPrint import Data.Text (Text) import qualified Data.Text.IO as T import Data.Time.Clock import Data.Time.LocalTime -- import Data.Text (Text) -- import qualified Data.Text as T import System.Directory (getHomeDirectory) import System.FilePath((), isRelative) import System.IO -- import Text.Printf -- import qualified Data.Map as Map import Hledger.Utils.Debug import Hledger.Utils.Parse import Hledger.Utils.Regex import Hledger.Utils.String import Hledger.Utils.Text import Hledger.Utils.Test import Hledger.Utils.Tree -- import Prelude hiding (readFile,writeFile,appendFile,getContents,putStr,putStrLn) -- import Hledger.Utils.UTF8IOCompat (readFile,writeFile,appendFile,getContents,putStr,putStrLn) import Hledger.Utils.UTF8IOCompat (SystemString,fromSystemString,toSystemString,error',userError',usageError) -- tuples first3 (x,_,_) = x second3 (_,x,_) = x third3 (_,_,x) = x first4 (x,_,_,_) = x second4 (_,x,_,_) = x third4 (_,_,x,_) = x fourth4 (_,_,_,x) = x first5 (x,_,_,_,_) = x second5 (_,x,_,_,_) = x third5 (_,_,x,_,_) = x fourth5 (_,_,_,x,_) = x fifth5 (_,_,_,_,x) = x first6 (x,_,_,_,_,_) = x second6 (_,x,_,_,_,_) = x third6 (_,_,x,_,_,_) = x fourth6 (_,_,_,x,_,_) = x fifth6 (_,_,_,_,x,_) = x sixth6 (_,_,_,_,_,x) = x -- lists splitAtElement :: Eq a => a -> [a] -> [[a]] splitAtElement x l = case l of [] -> [] e:es | e==x -> split es es -> split es where split es = let (first,rest) = break (x==) es in first : splitAtElement x rest -- text -- time getCurrentLocalTime :: IO LocalTime getCurrentLocalTime = do t <- getCurrentTime tz <- getCurrentTimeZone return $ utcToLocalTime tz t getCurrentZonedTime :: IO ZonedTime getCurrentZonedTime = do t <- getCurrentTime tz <- getCurrentTimeZone return $ utcToZonedTime tz t -- misc isLeft :: Either a b -> Bool isLeft (Left _) = True isLeft _ = False isRight :: Either a b -> Bool isRight = not . isLeft -- | Apply a function the specified number of times. Possibly uses O(n) stack ? applyN :: Int -> (a -> a) -> a -> a applyN n f = (!! n) . iterate f -- from protolude, compare -- applyN :: Int -> (a -> a) -> a -> a -- applyN n f = X.foldr (.) identity (X.replicate n f) -- | Convert a possibly relative, possibly tilde-containing file path to an absolute one, -- given the current directory. ~username is not supported. Leave "-" unchanged. -- Can raise an error. expandPath :: FilePath -> FilePath -> IO FilePath -- general type sig for use in reader parsers expandPath _ "-" = return "-" expandPath curdir p = (if isRelative p then (curdir ) else id) `liftM` expandPath' p where expandPath' ('~':'/':p) = ( p) <$> getHomeDirectory expandPath' ('~':'\\':p) = ( p) <$> getHomeDirectory expandPath' ('~':_) = ioError $ userError "~USERNAME in paths is not supported" expandPath' p = return p firstJust ms = case dropWhile (==Nothing) ms of [] -> Nothing (md:_) -> md -- | Read a file in universal newline mode, handling any of the usual line ending conventions. readFile' :: FilePath -> IO Text readFile' name = do h <- openFile name ReadMode hSetNewlineMode h universalNewlineMode T.hGetContents h -- | Read a file in universal newline mode, handling any of the usual line ending conventions. readFileAnyLineEnding :: FilePath -> IO Text readFileAnyLineEnding path = do h <- openFile path ReadMode hSetNewlineMode h universalNewlineMode T.hGetContents h -- | Read the given file, or standard input if the path is "-", using -- universal newline mode. readFileOrStdinAnyLineEnding :: String -> IO Text readFileOrStdinAnyLineEnding f = do h <- fileHandle f hSetNewlineMode h universalNewlineMode T.hGetContents h where fileHandle "-" = return stdin fileHandle f = openFile f ReadMode -- | Total version of maximum, for integral types, giving 0 for an empty list. maximum' :: Integral a => [a] -> a maximum' [] = 0 maximum' xs = maximumStrict xs -- | Strict version of sum that doesn’t leak space {-# INLINABLE sumStrict #-} sumStrict :: Num a => [a] -> a sumStrict = foldl' (+) 0 -- | Strict version of maximum that doesn’t leak space {-# INLINABLE maximumStrict #-} maximumStrict :: Ord a => [a] -> a maximumStrict = foldl1' max -- | Strict version of minimum that doesn’t leak space {-# INLINABLE minimumStrict #-} minimumStrict :: Ord a => [a] -> a minimumStrict = foldl1' min -- | This is a version of sequence based on difference lists. It is -- slightly faster but we mostly use it because it uses the heap -- instead of the stack. This has the advantage that Neil Mitchell’s -- trick of limiting the stack size to discover space leaks doesn’t -- show this as a false positive. {-# INLINABLE sequence' #-} sequence' :: Monad f => [f a] -> f [a] sequence' ms = do h <- go id ms return (h []) where go h [] = return h go h (m:ms) = do x <- m go (h . (x :)) ms {-# INLINABLE mapM' #-} mapM' :: Monad f => (a -> f b) -> [a] -> f [b] mapM' f = sequence' . map f hledger-lib-1.2/Hledger/Utils/Debug.hs0000644000000000000000000002134613035510426015761 0ustar0000000000000000{-# LANGUAGE CPP, FlexibleContexts, TypeFamilies #-} -- | Debugging helpers -- more: -- http://hackage.haskell.org/packages/archive/TraceUtils/0.1.0.2/doc/html/Debug-TraceUtils.html -- http://hackage.haskell.org/packages/archive/trace-call/0.1/doc/html/Debug-TraceCall.html -- http://hackage.haskell.org/packages/archive/htrace/0.1/doc/html/Debug-HTrace.html -- http://hackage.haskell.org/packages/archive/traced/2009.7.20/doc/html/Debug-Traced.html module Hledger.Utils.Debug ( module Hledger.Utils.Debug ,module Debug.Trace #if __GLASGOW_HASKELL__ >= 704 ,ppShow #endif ) where import Control.Monad (when) import Control.Monad.IO.Class import Data.List hiding (uncons) import qualified Data.Text as T import Debug.Trace import Hledger.Utils.Parse import Safe (readDef) import System.Environment (getArgs) import System.Exit import System.IO.Unsafe (unsafePerformIO) import Text.Megaparsec import Text.Printf #if __GLASGOW_HASKELL__ >= 704 import Text.Show.Pretty (ppShow) #else -- the required pretty-show version requires GHC >= 7.4 ppShow :: Show a => a -> String ppShow = show #endif pprint :: Show a => a -> IO () pprint = putStrLn . ppShow -- | Trace (print to stderr) a showable value using a custom show function. traceWith :: (a -> String) -> a -> a traceWith f a = trace (f a) a -- | Parsec trace - show the current parsec position and next input, -- and the provided label if it's non-null. ptrace :: String -> TextParser m () ptrace msg = do pos <- getPosition next <- (T.take peeklength) `fmap` getInput let (l,c) = (sourceLine pos, sourceColumn pos) s = printf "at line %2d col %2d: %s" (unPos l) (unPos c) (show next) :: String s' = printf ("%-"++show (peeklength+30)++"s") s ++ " " ++ msg trace s' $ return () where peeklength = 30 -- | Global debug level, which controls the verbosity of debug output -- on the console. The default is 0 meaning no debug output. The -- @--debug@ command line flag sets it to 1, or @--debug=N@ sets it to -- a higher value (note: not @--debug N@ for some reason). This uses -- unsafePerformIO and can be accessed from anywhere and before normal -- command-line processing. When running with :main in GHCI, you must -- touch and reload this module to see the effect of a new --debug option. -- After command-line processing, it is also available as the @debug_@ -- field of 'Hledger.Cli.CliOptions.CliOpts'. -- {-# OPTIONS_GHC -fno-cse #-} -- {-# NOINLINE debugLevel #-} debugLevel :: Int debugLevel = case snd $ break (=="--debug") args of "--debug":[] -> 1 "--debug":n:_ -> readDef 1 n _ -> case take 1 $ filter ("--debug" `isPrefixOf`) args of ['-':'-':'d':'e':'b':'u':'g':'=':v] -> readDef 1 v _ -> 0 where args = unsafePerformIO getArgs -- | Convenience aliases for tracePrettyAt. -- Always pretty-print a message and the showable value to the console, then return it. -- ("dbg" without the 0 clashes with megaparsec 5.1). dbg0 :: Show a => String -> a -> a dbg0 = tracePrettyAt 0 -- | Pretty-print a message and the showable value to the console when the debug level is >= 1, then return it. Uses unsafePerformIO. dbg1 :: Show a => String -> a -> a dbg1 = tracePrettyAt 1 dbg2 :: Show a => String -> a -> a dbg2 = tracePrettyAt 2 dbg3 :: Show a => String -> a -> a dbg3 = tracePrettyAt 3 dbg4 :: Show a => String -> a -> a dbg4 = tracePrettyAt 4 dbg5 :: Show a => String -> a -> a dbg5 = tracePrettyAt 5 dbg6 :: Show a => String -> a -> a dbg6 = tracePrettyAt 6 dbg7 :: Show a => String -> a -> a dbg7 = tracePrettyAt 7 dbg8 :: Show a => String -> a -> a dbg8 = tracePrettyAt 8 dbg9 :: Show a => String -> a -> a dbg9 = tracePrettyAt 9 -- | Convenience aliases for tracePrettyAtIO. -- Like dbg, but convenient to insert in an IO monad. -- XXX These have a bug; they should use traceIO, not trace, -- otherwise GHC can occasionally over-optimise -- (cf lpaste a few days ago where it killed/blocked a child thread). dbg0IO :: (MonadIO m, Show a) => String -> a -> m () dbg0IO = tracePrettyAtIO 0 dbg1IO :: (MonadIO m, Show a) => String -> a -> m () dbg1IO = tracePrettyAtIO 1 dbg2IO :: (MonadIO m, Show a) => String -> a -> m () dbg2IO = tracePrettyAtIO 2 dbg3IO :: (MonadIO m, Show a) => String -> a -> m () dbg3IO = tracePrettyAtIO 3 dbg4IO :: (MonadIO m, Show a) => String -> a -> m () dbg4IO = tracePrettyAtIO 4 dbg5IO :: (MonadIO m, Show a) => String -> a -> m () dbg5IO = tracePrettyAtIO 5 dbg6IO :: (MonadIO m, Show a) => String -> a -> m () dbg6IO = tracePrettyAtIO 6 dbg7IO :: (MonadIO m, Show a) => String -> a -> m () dbg7IO = tracePrettyAtIO 7 dbg8IO :: (MonadIO m, Show a) => String -> a -> m () dbg8IO = tracePrettyAtIO 8 dbg9IO :: (MonadIO m, Show a) => String -> a -> m () dbg9IO = tracePrettyAtIO 9 -- | Pretty-print a message and a showable value to the console if the debug level is at or above the specified level. -- dbtAt 0 always prints. Otherwise, uses unsafePerformIO. tracePrettyAt :: Show a => Int -> String -> a -> a tracePrettyAt lvl = dbgppshow lvl -- tracePrettyAtM :: (Monad m, Show a) => Int -> String -> a -> m a -- tracePrettyAtM lvl lbl x = tracePrettyAt lvl lbl x `seq` return x -- XXX Could not deduce (a ~ ()) -- from the context (Show a) -- bound by the type signature for -- dbgM :: Show a => String -> a -> IO () -- at hledger/Hledger/Cli/Main.hs:200:13-42 -- ‘a’ is a rigid type variable bound by -- the type signature for dbgM :: Show a => String -> a -> IO () -- at hledger/Hledger/Cli/Main.hs:200:13 -- Expected type: String -> a -> IO () -- Actual type: String -> a -> IO a tracePrettyAtIO :: (MonadIO m, Show a) => Int -> String -> a -> m () tracePrettyAtIO lvl lbl x = liftIO $ tracePrettyAt lvl lbl x `seq` return () -- | print this string to the console before evaluating the expression, -- if the global debug level is at or above the specified level. Uses unsafePerformIO. -- dbgtrace :: Int -> String -> a -> a -- dbgtrace level -- | debugLevel >= level = trace -- | otherwise = flip const -- | Print a showable value to the console, with a message, if the -- debug level is at or above the specified level (uses -- unsafePerformIO). -- Values are displayed with show, all on one line, which is hard to read. -- dbgshow :: Show a => Int -> String -> a -> a -- dbgshow level -- | debugLevel >= level = ltrace -- | otherwise = flip const -- | Print a showable value to the console, with a message, if the -- debug level is at or above the specified level (uses -- unsafePerformIO). -- Values are displayed with ppShow, each field/constructor on its own line. dbgppshow :: Show a => Int -> String -> a -> a dbgppshow level | level > 0 && debugLevel < level = flip const | otherwise = \s a -> let p = ppShow a ls = lines p nlorspace | length ls > 1 = "\n" | otherwise = " " ++ take (10 - length s) (repeat ' ') ls' | length ls > 1 = map (" "++) ls | otherwise = ls in trace (s++":"++nlorspace++intercalate "\n" ls') a -- -- | Print a showable value to the console, with a message, if the -- -- debug level is at or above the specified level (uses -- -- unsafePerformIO). -- -- Values are displayed with pprint. Field names are not shown, but the -- -- output is compact with smart line wrapping, long data elided, -- -- and slow calculations timed out. -- dbgpprint :: Data a => Int -> String -> a -> a -- dbgpprint level msg a -- | debugLevel >= level = unsafePerformIO $ do -- pprint a >>= putStrLn . ((msg++": \n") ++) . show -- return a -- | otherwise = a -- | Like dbg, then exit the program. Uses unsafePerformIO. dbgExit :: Show a => String -> a -> a dbgExit msg = const (unsafePerformIO exitFailure) . dbg0 msg -- | Print a message and parsec debug info (parse position and next -- input) to the console when the debug level is at or above -- this level. Uses unsafePerformIO. -- pdbgAt :: GenParser m => Float -> String -> m () pdbg :: Int -> String -> TextParser m () pdbg level msg = when (level <= debugLevel) $ ptrace msg -- | Like dbg, but writes the output to "debug.log" in the current directory. -- Uses unsafePerformIO. Can fail due to log file contention if called too quickly -- ("*** Exception: debug.log: openFile: resource busy (file is locked)"). dbglog :: Show a => String -> a -> a dbglog label a = (unsafePerformIO $ appendFile "debug.log" $ label ++ ": " ++ ppShow a ++ "\n") `seq` a hledger-lib-1.2/Hledger/Utils/Parse.hs0000644000000000000000000000511713035510426016003 0ustar0000000000000000{-# LANGUAGE FlexibleContexts, TypeFamilies #-} module Hledger.Utils.Parse where import Control.Monad.Except import Data.Char import Data.List import Data.Text (Text) import Text.Megaparsec hiding (State) import Data.Functor.Identity (Identity(..)) import Text.Printf import Control.Monad.State.Strict (StateT, evalStateT) import Hledger.Data.Types import Hledger.Utils.UTF8IOCompat (error') -- | A parser of strict text with generic user state, monad and return type. type TextParser m a = ParsecT Dec Text m a type JournalStateParser m a = StateT Journal (ParsecT Dec Text m) a type JournalParser a = StateT Journal (ParsecT Dec Text Identity) a -- | A journal parser that runs in IO and can throw an error mid-parse. type ErroringJournalParser m a = StateT Journal (ParsecT Dec Text (ExceptT String m)) a -- | Backtracking choice, use this when alternatives share a prefix. -- Consumes no input if all choices fail. choice' :: [TextParser m a] -> TextParser m a choice' = choice . map Text.Megaparsec.try -- | Backtracking choice, use this when alternatives share a prefix. -- Consumes no input if all choices fail. choiceInState :: [StateT s (ParsecT Dec Text m) a] -> StateT s (ParsecT Dec Text m) a choiceInState = choice . map Text.Megaparsec.try parsewith :: Parsec e Text a -> Text -> Either (ParseError Char e) a parsewith p = runParser p "" parsewithString :: Parsec e String a -> String -> Either (ParseError Char e) a parsewithString p = runParser p "" parseWithState :: Monad m => st -> StateT st (ParsecT Dec Text m) a -> Text -> m (Either (ParseError Char Dec) a) parseWithState ctx p s = runParserT (evalStateT p ctx) "" s parseWithState' :: (Stream s, ErrorComponent e) => st -> StateT st (ParsecT e s Identity) a -> s -> (Either (ParseError (Token s) e) a) parseWithState' ctx p s = runParser (evalStateT p ctx) "" s fromparse :: (Show t, Show e) => Either (ParseError t e) a -> a fromparse = either parseerror id parseerror :: (Show t, Show e) => ParseError t e -> a parseerror e = error' $ showParseError e showParseError :: (Show t, Show e) => ParseError t e -> String showParseError e = "parse error at " ++ show e showDateParseError :: (Show t, Show e) => ParseError t e -> String showDateParseError e = printf "date parse error (%s)" (intercalate ", " $ tail $ lines $ show e) nonspace :: TextParser m Char nonspace = satisfy (not . isSpace) spacenonewline :: (Stream s, Char ~ Token s) => ParsecT Dec s m Char spacenonewline = satisfy (`elem` " \v\f\t") restofline :: TextParser m String restofline = anyChar `manyTill` newline eolof :: TextParser m () eolof = (newline >> return ()) <|> eof hledger-lib-1.2/Hledger/Utils/Regex.hs0000644000000000000000000001142413035210046015774 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables #-} {-| Easy regular expression helpers, currently based on regex-tdfa. These should: - be cross-platform, not requiring C libraries - support unicode - support extended regular expressions - support replacement, with backreferences etc. - support splitting - have mnemonic names - have simple monomorphic types - work with simple strings Regex strings are automatically compiled into regular expressions the first time they are seen, and these are cached. If you use a huge number of unique regular expressions this might lead to increased memory usage. Several functions have memoised variants (*Memo), which also trade space for time. Current limitations: - (?i) and similar are not supported -} module Hledger.Utils.Regex ( -- * type aliases Regexp ,Replacement -- * standard regex operations ,regexMatches ,regexMatchesCI ,regexReplace ,regexReplaceCI ,regexReplaceMemo ,regexReplaceCIMemo ,regexReplaceBy ,regexReplaceByCI ) where import Data.Array import Data.Char import Data.List (foldl') import Data.MemoUgly (memo) import Text.Regex.TDFA ( Regex, CompOption(..), ExecOption(..), defaultCompOpt, defaultExecOpt, makeRegexOpts, AllMatches(getAllMatches), match, (=~), MatchText ) -- import Hledger.Utils.Debug import Hledger.Utils.UTF8IOCompat (error') -- | Regular expression. Extended regular expression-ish syntax ? But does not support eg (?i) syntax. type Regexp = String -- | A replacement pattern. May include numeric backreferences (\N). type Replacement = String -- | Convert our string-based regexps to real ones. Can fail if the -- string regexp is malformed. toRegex :: Regexp -> Regex toRegex = memo (makeRegexOpts compOpt execOpt) toRegexCI :: Regexp -> Regex toRegexCI = memo (makeRegexOpts compOpt{caseSensitive=False} execOpt) compOpt :: CompOption compOpt = defaultCompOpt execOpt :: ExecOption execOpt = defaultExecOpt -- regexMatch' :: RegexContext Regexp String a => Regexp -> String -> a -- regexMatch' r s = s =~ (toRegex r) regexMatches :: Regexp -> String -> Bool regexMatches = flip (=~) regexMatchesCI :: Regexp -> String -> Bool regexMatchesCI r = match (toRegexCI r) -- | Replace all occurrences of the regexp, transforming each match with the given function. regexReplaceBy :: Regexp -> (String -> String) -> String -> String regexReplaceBy r = replaceAllBy (toRegex r) regexReplaceByCI :: Regexp -> (String -> String) -> String -> String regexReplaceByCI r = replaceAllBy (toRegexCI r) -- | Replace all occurrences of the regexp with the replacement -- pattern. The replacement pattern supports numeric backreferences -- (\N) but no other RE syntax. regexReplace :: Regexp -> Replacement -> String -> String regexReplace re = replaceRegex (toRegex re) regexReplaceCI :: Regexp -> Replacement -> String -> String regexReplaceCI re = replaceRegex (toRegexCI re) -- | A memoising version of regexReplace. Caches the result for each -- search pattern, replacement pattern, target string tuple. regexReplaceMemo :: Regexp -> Replacement -> String -> String regexReplaceMemo re repl = memo (regexReplace re repl) regexReplaceCIMemo :: Regexp -> Replacement -> String -> String regexReplaceCIMemo re repl = memo (regexReplaceCI re repl) -- replaceRegex :: Regex -> Replacement -> String -> String replaceRegex re repl s = foldl (replaceMatch repl) s (reverse $ match re s :: [MatchText String]) replaceMatch :: Replacement -> String -> MatchText String -> String replaceMatch replpat s matchgroups = pre ++ repl ++ post where ((_,(off,len)):_) = elems matchgroups -- groups should have 0-based indexes, and there should always be at least one, since this is a match (pre, post') = splitAt off s post = drop len post' repl = replaceAllBy (toRegex "\\\\[0-9]+") (replaceBackReference matchgroups) replpat replaceBackReference :: MatchText String -> String -> String replaceBackReference grps ('\\':s@(_:_)) | all isDigit s = case read s of n | n `elem` indices grps -> fst (grps ! n) _ -> error' $ "no match group exists for backreference \"\\"++s++"\"" replaceBackReference _ s = error' $ "replaceBackReference called on non-numeric-backreference \""++s++"\", shouldn't happen" -- -- http://stackoverflow.com/questions/9071682/replacement-substition-with-haskell-regex-libraries : -- | Replace all occurrences of a regexp in a string, transforming each match with the given function. replaceAllBy :: Regex -> (String -> String) -> String -> String replaceAllBy re f s = start end where (_, end, start) = foldl' go (0, s, id) $ (getAllMatches $ match re s :: [(Int, Int)]) go (ind,read,write) (off,len) = let (skip, start) = splitAt (off - ind) read (matched, remaining) = splitAt len start in (off + len, remaining, write . (skip++) . (f matched ++)) hledger-lib-1.2/Hledger/Utils/String.hs0000644000000000000000000003340013035510426016173 0ustar0000000000000000-- | String formatting helpers, starting to get a bit out of control. module Hledger.Utils.String ( -- * misc lowercase, uppercase, underline, stripbrackets, unbracket, -- quoting quoteIfNeeded, singleQuoteIfNeeded, -- quotechars, -- whitespacechars, escapeQuotes, words', unwords', -- * single-line layout strip, lstrip, rstrip, chomp, elideLeft, elideRight, formatString, -- * multi-line layout concatTopPadded, concatBottomPadded, concatOneLine, vConcatLeftAligned, vConcatRightAligned, padtop, padbottom, padleft, padright, cliptopleft, fitto, -- * wide-character-aware layout charWidth, strWidth, takeWidth, fitString, fitStringMulti, padLeftWide, padRightWide ) where import Data.Char import Data.List import Text.Megaparsec import Text.Printf (printf) import Hledger.Utils.Parse import Hledger.Utils.Regex lowercase, uppercase :: String -> String lowercase = map toLower uppercase = map toUpper -- | Remove leading and trailing whitespace. strip :: String -> String strip = lstrip . rstrip -- | Remove leading whitespace. lstrip :: String -> String lstrip = dropWhile isSpace -- | Remove trailing whitespace. rstrip :: String -> String rstrip = reverse . lstrip . reverse -- | Remove trailing newlines/carriage returns. chomp :: String -> String chomp = reverse . dropWhile (`elem` "\r\n") . reverse stripbrackets :: String -> String stripbrackets = dropWhile (`elem` "([") . reverse . dropWhile (`elem` "])") . reverse :: String -> String elideLeft :: Int -> String -> String elideLeft width s = if length s > width then ".." ++ reverse (take (width - 2) $ reverse s) else s elideRight :: Int -> String -> String elideRight width s = if length s > width then take (width - 2) s ++ ".." else s -- | Clip and pad a string to a minimum & maximum width, and/or left/right justify it. -- Works on multi-line strings too (but will rewrite non-unix line endings). formatString :: Bool -> Maybe Int -> Maybe Int -> String -> String formatString leftJustified minwidth maxwidth s = intercalate "\n" $ map (printf fmt) $ lines s where justify = if leftJustified then "-" else "" minwidth' = maybe "" show minwidth maxwidth' = maybe "" (("."++).show) maxwidth fmt = "%" ++ justify ++ minwidth' ++ maxwidth' ++ "s" underline :: String -> String underline s = s' ++ replicate (length s) '-' ++ "\n" where s' | last s == '\n' = s | otherwise = s ++ "\n" -- | Double-quote this string if it contains whitespace, single quotes -- or double-quotes, escaping the quotes as needed. quoteIfNeeded :: String -> String quoteIfNeeded s | any (`elem` s) (quotechars++whitespacechars) = "\"" ++ escapeDoubleQuotes s ++ "\"" | otherwise = s -- | Single-quote this string if it contains whitespace or double-quotes. -- No good for strings containing single quotes. singleQuoteIfNeeded :: String -> String singleQuoteIfNeeded s | any (`elem` s) whitespacechars = "'"++s++"'" | otherwise = s quotechars, whitespacechars :: [Char] quotechars = "'\"" whitespacechars = " \t\n\r" escapeDoubleQuotes :: String -> String escapeDoubleQuotes = regexReplace "\"" "\"" escapeQuotes :: String -> String escapeQuotes = regexReplace "([\"'])" "\\1" -- | Quote-aware version of words - don't split on spaces which are inside quotes. -- NB correctly handles "a'b" but not "''a''". Can raise an error if parsing fails. words' :: String -> [String] words' "" = [] words' s = map stripquotes $ fromparse $ parsewithString p s where p = do ss <- (singleQuotedPattern <|> doubleQuotedPattern <|> pattern) `sepBy` some spacenonewline -- eof return ss pattern = many (noneOf whitespacechars) singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf "'") doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf "\"") -- | Quote-aware version of unwords - single-quote strings which contain whitespace unwords' :: [String] -> String unwords' = unwords . map quoteIfNeeded -- | Strip one matching pair of single or double quotes on the ends of a string. stripquotes :: String -> String stripquotes s = if isSingleQuoted s || isDoubleQuoted s then init $ tail s else s isSingleQuoted s@(_:_:_) = head s == '\'' && last s == '\'' isSingleQuoted _ = False isDoubleQuoted s@(_:_:_) = head s == '"' && last s == '"' isDoubleQuoted _ = False unbracket :: String -> String unbracket s | (head s == '[' && last s == ']') || (head s == '(' && last s == ')') = init $ tail s | otherwise = s -- | Join several multi-line strings as side-by-side rectangular strings of the same height, top-padded. -- Treats wide characters as double width. concatTopPadded :: [String] -> String concatTopPadded strs = intercalate "\n" $ map concat $ transpose padded where lss = map lines strs h = maximum $ map length lss ypad ls = replicate (difforzero h (length ls)) "" ++ ls xpad ls = map (padLeftWide w) ls where w | null ls = 0 | otherwise = maximum $ map strWidth ls padded = map (xpad . ypad) lss -- | Join several multi-line strings as side-by-side rectangular strings of the same height, bottom-padded. -- Treats wide characters as double width. concatBottomPadded :: [String] -> String concatBottomPadded strs = intercalate "\n" $ map concat $ transpose padded where lss = map lines strs h = maximum $ map length lss ypad ls = ls ++ replicate (difforzero h (length ls)) "" xpad ls = map (padRightWide w) ls where w | null ls = 0 | otherwise = maximum $ map strWidth ls padded = map (xpad . ypad) lss -- | Join multi-line strings horizontally, after compressing each of -- them to a single line with a comma and space between each original line. concatOneLine :: [String] -> String concatOneLine strs = concat $ map ((intercalate ", ").lines) strs -- | Join strings vertically, left-aligned and right-padded. vConcatLeftAligned :: [String] -> String vConcatLeftAligned ss = intercalate "\n" $ map showfixedwidth ss where showfixedwidth = printf (printf "%%-%ds" width) width = maximum $ map length ss -- | Join strings vertically, right-aligned and left-padded. vConcatRightAligned :: [String] -> String vConcatRightAligned ss = intercalate "\n" $ map showfixedwidth ss where showfixedwidth = printf (printf "%%%ds" width) width = maximum $ map length ss -- | Convert a multi-line string to a rectangular string top-padded to the specified height. padtop :: Int -> String -> String padtop h s = intercalate "\n" xpadded where ls = lines s sh = length ls sw | null ls = 0 | otherwise = maximum $ map length ls ypadded = replicate (difforzero h sh) "" ++ ls xpadded = map (padleft sw) ypadded -- | Convert a multi-line string to a rectangular string bottom-padded to the specified height. padbottom :: Int -> String -> String padbottom h s = intercalate "\n" xpadded where ls = lines s sh = length ls sw | null ls = 0 | otherwise = maximum $ map length ls ypadded = ls ++ replicate (difforzero h sh) "" xpadded = map (padleft sw) ypadded difforzero :: (Num a, Ord a) => a -> a -> a difforzero a b = maximum [(a - b), 0] -- | Convert a multi-line string to a rectangular string left-padded to the specified width. -- Treats wide characters as double width. padleft :: Int -> String -> String padleft w "" = concat $ replicate w " " padleft w s = intercalate "\n" $ map (printf (printf "%%%ds" w)) $ lines s -- | Convert a multi-line string to a rectangular string right-padded to the specified width. -- Treats wide characters as double width. padright :: Int -> String -> String padright w "" = concat $ replicate w " " padright w s = intercalate "\n" $ map (printf (printf "%%-%ds" w)) $ lines s -- | Clip a multi-line string to the specified width and height from the top left. cliptopleft :: Int -> Int -> String -> String cliptopleft w h = intercalate "\n" . take h . map (take w) . lines -- | Clip and pad a multi-line string to fill the specified width and height. fitto :: Int -> Int -> String -> String fitto w h s = intercalate "\n" $ take h $ rows ++ repeat blankline where rows = map (fit w) $ lines s fit w = take w . (++ repeat ' ') blankline = replicate w ' ' -- Functions below treat wide (eg CJK) characters as double-width. -- | General-purpose wide-char-aware single-line string layout function. -- It can left- or right-pad a short string to a minimum width. -- It can left- or right-clip a long string to a maximum width, optionally inserting an ellipsis (the third argument). -- It clips and pads on the right when the fourth argument is true, otherwise on the left. -- It treats wide characters as double width. fitString :: Maybe Int -> Maybe Int -> Bool -> Bool -> String -> String fitString mminwidth mmaxwidth ellipsify rightside s = (clip . pad) s where clip :: String -> String clip s = case mmaxwidth of Just w | strWidth s > w -> case rightside of True -> takeWidth (w - length ellipsis) s ++ ellipsis False -> ellipsis ++ reverse (takeWidth (w - length ellipsis) $ reverse s) | otherwise -> s where ellipsis = if ellipsify then ".." else "" Nothing -> s pad :: String -> String pad s = case mminwidth of Just w | sw < w -> case rightside of True -> s ++ replicate (w - sw) ' ' False -> replicate (w - sw) ' ' ++ s | otherwise -> s Nothing -> s where sw = strWidth s -- | A version of fitString that works on multi-line strings, -- separate for now to avoid breakage. -- This will rewrite any line endings to unix newlines. fitStringMulti :: Maybe Int -> Maybe Int -> Bool -> Bool -> String -> String fitStringMulti mminwidth mmaxwidth ellipsify rightside s = (intercalate "\n" . map (fitString mminwidth mmaxwidth ellipsify rightside) . lines) s -- | Left-pad a string to the specified width. -- Treats wide characters as double width. -- Works on multi-line strings too (but will rewrite non-unix line endings). padLeftWide :: Int -> String -> String padLeftWide w "" = replicate w ' ' padLeftWide w s = intercalate "\n" $ map (fitString (Just w) Nothing False False) $ lines s -- XXX not yet replaceable by -- padLeftWide w = fitStringMulti (Just w) Nothing False False -- | Right-pad a string to the specified width. -- Treats wide characters as double width. -- Works on multi-line strings too (but will rewrite non-unix line endings). padRightWide :: Int -> String -> String padRightWide w "" = replicate w ' ' padRightWide w s = intercalate "\n" $ map (fitString (Just w) Nothing False True) $ lines s -- XXX not yet replaceable by -- padRightWide w = fitStringMulti (Just w) Nothing False True -- | Double-width-character-aware string truncation. Take as many -- characters as possible from a string without exceeding the -- specified width. Eg takeWidth 3 "りんご" = "り". takeWidth :: Int -> String -> String takeWidth _ "" = "" takeWidth 0 _ = "" takeWidth w (c:cs) | cw <= w = c:takeWidth (w-cw) cs | otherwise = "" where cw = charWidth c -- from Pandoc (copyright John MacFarlane, GPL) -- see also http://unicode.org/reports/tr11/#Description -- | Calculate the designated render width of a string, taking into -- account wide characters and line breaks (the longest line within a -- multi-line string determines the width ). strWidth :: String -> Int strWidth "" = 0 strWidth s = maximum $ map (foldr (\a b -> charWidth a + b) 0) $ lines s -- | Get the designated render width of a character: 0 for a combining -- character, 1 for a regular character, 2 for a wide character. -- (Wide characters are rendered as exactly double width in apps and -- fonts that support it.) (From Pandoc.) charWidth :: Char -> Int charWidth c = case c of _ | c < '\x0300' -> 1 | c >= '\x0300' && c <= '\x036F' -> 0 -- combining | c >= '\x0370' && c <= '\x10FC' -> 1 | c >= '\x1100' && c <= '\x115F' -> 2 | c >= '\x1160' && c <= '\x11A2' -> 1 | c >= '\x11A3' && c <= '\x11A7' -> 2 | c >= '\x11A8' && c <= '\x11F9' -> 1 | c >= '\x11FA' && c <= '\x11FF' -> 2 | c >= '\x1200' && c <= '\x2328' -> 1 | c >= '\x2329' && c <= '\x232A' -> 2 | c >= '\x232B' && c <= '\x2E31' -> 1 | c >= '\x2E80' && c <= '\x303E' -> 2 | c == '\x303F' -> 1 | c >= '\x3041' && c <= '\x3247' -> 2 | c >= '\x3248' && c <= '\x324F' -> 1 -- ambiguous | c >= '\x3250' && c <= '\x4DBF' -> 2 | c >= '\x4DC0' && c <= '\x4DFF' -> 1 | c >= '\x4E00' && c <= '\xA4C6' -> 2 | c >= '\xA4D0' && c <= '\xA95F' -> 1 | c >= '\xA960' && c <= '\xA97C' -> 2 | c >= '\xA980' && c <= '\xABF9' -> 1 | c >= '\xAC00' && c <= '\xD7FB' -> 2 | c >= '\xD800' && c <= '\xDFFF' -> 1 | c >= '\xE000' && c <= '\xF8FF' -> 1 -- ambiguous | c >= '\xF900' && c <= '\xFAFF' -> 2 | c >= '\xFB00' && c <= '\xFDFD' -> 1 | c >= '\xFE00' && c <= '\xFE0F' -> 1 -- ambiguous | c >= '\xFE10' && c <= '\xFE19' -> 2 | c >= '\xFE20' && c <= '\xFE26' -> 1 | c >= '\xFE30' && c <= '\xFE6B' -> 2 | c >= '\xFE70' && c <= '\xFEFF' -> 1 | c >= '\xFF01' && c <= '\xFF60' -> 2 | c >= '\xFF61' && c <= '\x16A38' -> 1 | c >= '\x1B000' && c <= '\x1B001' -> 2 | c >= '\x1D000' && c <= '\x1F1FF' -> 1 | c >= '\x1F200' && c <= '\x1F251' -> 2 | c >= '\x1F300' && c <= '\x1F773' -> 1 | c >= '\x20000' && c <= '\x3FFFD' -> 2 | otherwise -> 1 hledger-lib-1.2/Hledger/Utils/Test.hs0000644000000000000000000000332113035210046015636 0ustar0000000000000000module Hledger.Utils.Test where import Test.HUnit import Text.Megaparsec -- | Get a Test's label, or the empty string. testName :: Test -> String testName (TestLabel n _) = n testName _ = "" -- | Flatten a Test containing TestLists into a list of single tests. flattenTests :: Test -> [Test] flattenTests (TestLabel _ t@(TestList _)) = flattenTests t flattenTests (TestList ts) = concatMap flattenTests ts flattenTests t = [t] -- | Filter TestLists in a Test, recursively, preserving the structure. filterTests :: (Test -> Bool) -> Test -> Test filterTests p (TestLabel l ts) = TestLabel l (filterTests p ts) filterTests p (TestList ts) = TestList $ filter (any p . flattenTests) $ map (filterTests p) ts filterTests _ t = t -- | Simple way to assert something is some expected value, with no label. is :: (Eq a, Show a) => a -> a -> Assertion a `is` e = assertEqual "" e a -- | Assert a parse result is successful, printing the parse error on failure. assertParse :: (Show t, Show e) => (Either (ParseError t e) a) -> Assertion assertParse parse = either (assertFailure.show) (const (return ())) parse -- | Assert a parse result is successful, printing the parse error on failure. assertParseFailure :: (Either (ParseError t e) a) -> Assertion assertParseFailure parse = either (const $ return ()) (const $ assertFailure "parse should not have succeeded") parse -- | Assert a parse result is some expected value, printing the parse error on failure. assertParseEqual :: (Show a, Eq a, Show t, Show e) => (Either (ParseError t e) a) -> a -> Assertion assertParseEqual parse expected = either (assertFailure.show) (`is` expected) parse printParseError :: (Show a) => a -> IO () printParseError e = do putStr "parse error at "; print e hledger-lib-1.2/Hledger/Utils/Text.hs0000644000000000000000000003754613035210046015663 0ustar0000000000000000-- | Text formatting helpers, ported from String as needed. -- There may be better alternatives out there. {-# LANGUAGE OverloadedStrings #-} module Hledger.Utils.Text -- ( -- -- * misc -- lowercase, -- uppercase, -- underline, -- stripbrackets, -- unbracket, -- -- quoting -- quoteIfSpaced, -- quoteIfNeeded, -- singleQuoteIfNeeded, -- -- quotechars, -- -- whitespacechars, -- escapeDoubleQuotes, -- escapeSingleQuotes, -- escapeQuotes, -- words', -- unwords', -- stripquotes, -- isSingleQuoted, -- isDoubleQuoted, -- -- * single-line layout -- strip, -- lstrip, -- rstrip, -- chomp, -- elideLeft, -- elideRight, -- formatString, -- -- * multi-line layout -- concatTopPadded, -- concatBottomPadded, -- concatOneLine, -- vConcatLeftAligned, -- vConcatRightAligned, -- padtop, -- padbottom, -- padleft, -- padright, -- cliptopleft, -- fitto, -- -- * wide-character-aware layout -- strWidth, -- textTakeWidth, -- fitString, -- fitStringMulti, -- padLeftWide, -- padRightWide -- ) where -- import Data.Char import Data.List import Data.Monoid import Data.Text (Text) import qualified Data.Text as T -- import Text.Parsec -- import Text.Printf (printf) -- import Hledger.Utils.Parse -- import Hledger.Utils.Regex import Hledger.Utils.String (charWidth) -- lowercase, uppercase :: String -> String -- lowercase = map toLower -- uppercase = map toUpper -- | Remove leading and trailing whitespace. textstrip :: Text -> Text textstrip = textlstrip . textrstrip -- | Remove leading whitespace. textlstrip :: Text -> Text textlstrip = T.dropWhile (`elem` (" \t" :: String)) :: Text -> Text -- XXX isSpace ? -- | Remove trailing whitespace. textrstrip = T.reverse . textlstrip . T.reverse textrstrip :: Text -> Text -- -- | Remove trailing newlines/carriage returns. -- chomp :: String -> String -- chomp = reverse . dropWhile (`elem` "\r\n") . reverse -- stripbrackets :: String -> String -- stripbrackets = dropWhile (`elem` "([") . reverse . dropWhile (`elem` "])") . reverse :: String -> String -- elideLeft :: Int -> String -> String -- elideLeft width s = -- if length s > width then ".." ++ reverse (take (width - 2) $ reverse s) else s textElideRight :: Int -> Text -> Text textElideRight width t = if T.length t > width then T.take (width - 2) t <> ".." else t -- -- | Clip and pad a string to a minimum & maximum width, and/or left/right justify it. -- -- Works on multi-line strings too (but will rewrite non-unix line endings). -- formatString :: Bool -> Maybe Int -> Maybe Int -> String -> String -- formatString leftJustified minwidth maxwidth s = intercalate "\n" $ map (printf fmt) $ lines s -- where -- justify = if leftJustified then "-" else "" -- minwidth' = maybe "" show minwidth -- maxwidth' = maybe "" (("."++).show) maxwidth -- fmt = "%" ++ justify ++ minwidth' ++ maxwidth' ++ "s" -- underline :: String -> String -- underline s = s' ++ replicate (length s) '-' ++ "\n" -- where s' -- | last s == '\n' = s -- | otherwise = s ++ "\n" -- | Wrap a string in double quotes, and \-prefix any embedded single -- quotes, if it contains whitespace and is not already single- or -- double-quoted. quoteIfSpaced :: T.Text -> T.Text quoteIfSpaced s | isSingleQuoted s || isDoubleQuoted s = s | not $ any (`elem` (T.unpack s)) whitespacechars = s | otherwise = "'"<>escapeSingleQuotes s<>"'" -- -- | Wrap a string in double quotes, and \-prefix any embedded single -- -- quotes, if it contains whitespace and is not already single- or -- -- double-quoted. -- quoteIfSpaced :: String -> String -- quoteIfSpaced s | isSingleQuoted s || isDoubleQuoted s = s -- | not $ any (`elem` s) whitespacechars = s -- | otherwise = "'"++escapeSingleQuotes s++"'" -- -- | Double-quote this string if it contains whitespace, single quotes -- -- or double-quotes, escaping the quotes as needed. -- quoteIfNeeded :: T.Text -> T.Text -- quoteIfNeeded s | any (`elem` T.unpack s) (quotechars++whitespacechars) = "\"" <> escapeDoubleQuotes s <> "\"" -- | otherwise = s -- -- | Single-quote this string if it contains whitespace or double-quotes. -- -- No good for strings containing single quotes. -- singleQuoteIfNeeded :: String -> String -- singleQuoteIfNeeded s | any (`elem` s) whitespacechars = "'"++s++"'" -- | otherwise = s quotechars, whitespacechars :: [Char] quotechars = "'\"" whitespacechars = " \t\n\r" escapeDoubleQuotes :: T.Text -> T.Text escapeDoubleQuotes = T.replace "\"" "\"" escapeSingleQuotes :: T.Text -> T.Text escapeSingleQuotes = T.replace "'" "\'" -- escapeQuotes :: String -> String -- escapeQuotes = regexReplace "([\"'])" "\\1" -- -- | Quote-aware version of words - don't split on spaces which are inside quotes. -- -- NB correctly handles "a'b" but not "''a''". Can raise an error if parsing fails. -- words' :: String -> [String] -- words' "" = [] -- words' s = map stripquotes $ fromparse $ parsewith p s -- where -- p = do ss <- (singleQuotedPattern <|> doubleQuotedPattern <|> pattern) `sepBy` many1 spacenonewline -- -- eof -- return ss -- pattern = many (noneOf whitespacechars) -- singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf "'") -- doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf "\"") -- -- | Quote-aware version of unwords - single-quote strings which contain whitespace -- unwords' :: [Text] -> Text -- unwords' = T.unwords . map quoteIfNeeded -- | Strip one matching pair of single or double quotes on the ends of a string. stripquotes :: Text -> Text stripquotes s = if isSingleQuoted s || isDoubleQuoted s then T.init $ T.tail s else s isSingleQuoted :: Text -> Bool isSingleQuoted s = T.length (T.take 2 s) == 2 && T.head s == '\'' && T.last s == '\'' isDoubleQuoted :: Text -> Bool isDoubleQuoted s = T.length (T.take 2 s) == 2 && T.head s == '"' && T.last s == '"' textUnbracket :: Text -> Text textUnbracket s | (T.head s == '[' && T.last s == ']') || (T.head s == '(' && T.last s == ')') = T.init $ T.tail s | otherwise = s -- | Join several multi-line strings as side-by-side rectangular strings of the same height, top-padded. -- Treats wide characters as double width. textConcatTopPadded :: [Text] -> Text textConcatTopPadded ts = T.intercalate "\n" $ map T.concat $ transpose padded where lss = map T.lines ts :: [[Text]] h = maximum $ map length lss ypad ls = replicate (difforzero h (length ls)) "" ++ ls xpad ls = map (textPadLeftWide w) ls where w | null ls = 0 | otherwise = maximum $ map textWidth ls padded = map (xpad . ypad) lss :: [[Text]] -- -- | Join several multi-line strings as side-by-side rectangular strings of the same height, bottom-padded. -- -- Treats wide characters as double width. -- concatBottomPadded :: [String] -> String -- concatBottomPadded strs = intercalate "\n" $ map concat $ transpose padded -- where -- lss = map lines strs -- h = maximum $ map length lss -- ypad ls = ls ++ replicate (difforzero h (length ls)) "" -- xpad ls = map (padRightWide w) ls where w | null ls = 0 -- | otherwise = maximum $ map strWidth ls -- padded = map (xpad . ypad) lss -- -- | Join multi-line strings horizontally, after compressing each of -- -- them to a single line with a comma and space between each original line. -- concatOneLine :: [String] -> String -- concatOneLine strs = concat $ map ((intercalate ", ").lines) strs -- -- | Join strings vertically, left-aligned and right-padded. -- vConcatLeftAligned :: [String] -> String -- vConcatLeftAligned ss = intercalate "\n" $ map showfixedwidth ss -- where -- showfixedwidth = printf (printf "%%-%ds" width) -- width = maximum $ map length ss -- -- | Join strings vertically, right-aligned and left-padded. -- vConcatRightAligned :: [String] -> String -- vConcatRightAligned ss = intercalate "\n" $ map showfixedwidth ss -- where -- showfixedwidth = printf (printf "%%%ds" width) -- width = maximum $ map length ss -- -- | Convert a multi-line string to a rectangular string top-padded to the specified height. -- padtop :: Int -> String -> String -- padtop h s = intercalate "\n" xpadded -- where -- ls = lines s -- sh = length ls -- sw | null ls = 0 -- | otherwise = maximum $ map length ls -- ypadded = replicate (difforzero h sh) "" ++ ls -- xpadded = map (padleft sw) ypadded -- -- | Convert a multi-line string to a rectangular string bottom-padded to the specified height. -- padbottom :: Int -> String -> String -- padbottom h s = intercalate "\n" xpadded -- where -- ls = lines s -- sh = length ls -- sw | null ls = 0 -- | otherwise = maximum $ map length ls -- ypadded = ls ++ replicate (difforzero h sh) "" -- xpadded = map (padleft sw) ypadded difforzero :: (Num a, Ord a) => a -> a -> a difforzero a b = maximum [(a - b), 0] -- -- | Convert a multi-line string to a rectangular string left-padded to the specified width. -- -- Treats wide characters as double width. -- padleft :: Int -> String -> String -- padleft w "" = concat $ replicate w " " -- padleft w s = intercalate "\n" $ map (printf (printf "%%%ds" w)) $ lines s -- -- | Convert a multi-line string to a rectangular string right-padded to the specified width. -- -- Treats wide characters as double width. -- padright :: Int -> String -> String -- padright w "" = concat $ replicate w " " -- padright w s = intercalate "\n" $ map (printf (printf "%%-%ds" w)) $ lines s -- -- | Clip a multi-line string to the specified width and height from the top left. -- cliptopleft :: Int -> Int -> String -> String -- cliptopleft w h = intercalate "\n" . take h . map (take w) . lines -- -- | Clip and pad a multi-line string to fill the specified width and height. -- fitto :: Int -> Int -> String -> String -- fitto w h s = intercalate "\n" $ take h $ rows ++ repeat blankline -- where -- rows = map (fit w) $ lines s -- fit w = take w . (++ repeat ' ') -- blankline = replicate w ' ' -- -- Functions below treat wide (eg CJK) characters as double-width. -- | General-purpose wide-char-aware single-line text layout function. -- It can left- or right-pad a short string to a minimum width. -- It can left- or right-clip a long string to a maximum width, optionally inserting an ellipsis (the third argument). -- It clips and pads on the right when the fourth argument is true, otherwise on the left. -- It treats wide characters as double width. fitText :: Maybe Int -> Maybe Int -> Bool -> Bool -> Text -> Text fitText mminwidth mmaxwidth ellipsify rightside s = (clip . pad) s where clip :: Text -> Text clip s = case mmaxwidth of Just w | textWidth s > w -> case rightside of True -> textTakeWidth (w - T.length ellipsis) s <> ellipsis False -> ellipsis <> T.reverse (textTakeWidth (w - T.length ellipsis) $ T.reverse s) | otherwise -> s where ellipsis = if ellipsify then ".." else "" Nothing -> s pad :: Text -> Text pad s = case mminwidth of Just w | sw < w -> case rightside of True -> s <> T.replicate (w - sw) " " False -> T.replicate (w - sw) " " <> s | otherwise -> s Nothing -> s where sw = textWidth s -- -- | A version of fitString that works on multi-line strings, -- -- separate for now to avoid breakage. -- -- This will rewrite any line endings to unix newlines. -- fitStringMulti :: Maybe Int -> Maybe Int -> Bool -> Bool -> String -> String -- fitStringMulti mminwidth mmaxwidth ellipsify rightside s = -- (intercalate "\n" . map (fitString mminwidth mmaxwidth ellipsify rightside) . lines) s -- | Left-pad a text to the specified width. -- Treats wide characters as double width. -- Works on multi-line texts too (but will rewrite non-unix line endings). textPadLeftWide :: Int -> Text -> Text textPadLeftWide w "" = T.replicate w " " textPadLeftWide w s = T.intercalate "\n" $ map (fitText (Just w) Nothing False False) $ T.lines s -- XXX not yet replaceable by -- padLeftWide w = fitStringMulti (Just w) Nothing False False -- | Right-pad a string to the specified width. -- Treats wide characters as double width. -- Works on multi-line strings too (but will rewrite non-unix line endings). textPadRightWide :: Int -> Text -> Text textPadRightWide w "" = T.replicate w " " textPadRightWide w s = T.intercalate "\n" $ map (fitText (Just w) Nothing False True) $ T.lines s -- XXX not yet replaceable by -- padRightWide w = fitStringMulti (Just w) Nothing False True -- | Double-width-character-aware string truncation. Take as many -- characters as possible from a string without exceeding the -- specified width. Eg textTakeWidth 3 "りんご" = "り". textTakeWidth :: Int -> Text -> Text textTakeWidth _ "" = "" textTakeWidth 0 _ = "" textTakeWidth w t | not (T.null t), let c = T.head t, let cw = charWidth c, cw <= w = T.cons c $ textTakeWidth (w-cw) (T.tail t) | otherwise = "" -- -- from Pandoc (copyright John MacFarlane, GPL) -- -- see also http://unicode.org/reports/tr11/#Description -- | Calculate the designated render width of a string, taking into -- account wide characters and line breaks (the longest line within a -- multi-line string determines the width ). textWidth :: Text -> Int textWidth "" = 0 textWidth s = maximum $ map (T.foldr (\a b -> charWidth a + b) 0) $ T.lines s -- -- | Get the designated render width of a character: 0 for a combining -- -- character, 1 for a regular character, 2 for a wide character. -- -- (Wide characters are rendered as exactly double width in apps and -- -- fonts that support it.) (From Pandoc.) -- charWidth :: Char -> Int -- charWidth c = -- case c of -- _ | c < '\x0300' -> 1 -- | c >= '\x0300' && c <= '\x036F' -> 0 -- combining -- | c >= '\x0370' && c <= '\x10FC' -> 1 -- | c >= '\x1100' && c <= '\x115F' -> 2 -- | c >= '\x1160' && c <= '\x11A2' -> 1 -- | c >= '\x11A3' && c <= '\x11A7' -> 2 -- | c >= '\x11A8' && c <= '\x11F9' -> 1 -- | c >= '\x11FA' && c <= '\x11FF' -> 2 -- | c >= '\x1200' && c <= '\x2328' -> 1 -- | c >= '\x2329' && c <= '\x232A' -> 2 -- | c >= '\x232B' && c <= '\x2E31' -> 1 -- | c >= '\x2E80' && c <= '\x303E' -> 2 -- | c == '\x303F' -> 1 -- | c >= '\x3041' && c <= '\x3247' -> 2 -- | c >= '\x3248' && c <= '\x324F' -> 1 -- ambiguous -- | c >= '\x3250' && c <= '\x4DBF' -> 2 -- | c >= '\x4DC0' && c <= '\x4DFF' -> 1 -- | c >= '\x4E00' && c <= '\xA4C6' -> 2 -- | c >= '\xA4D0' && c <= '\xA95F' -> 1 -- | c >= '\xA960' && c <= '\xA97C' -> 2 -- | c >= '\xA980' && c <= '\xABF9' -> 1 -- | c >= '\xAC00' && c <= '\xD7FB' -> 2 -- | c >= '\xD800' && c <= '\xDFFF' -> 1 -- | c >= '\xE000' && c <= '\xF8FF' -> 1 -- ambiguous -- | c >= '\xF900' && c <= '\xFAFF' -> 2 -- | c >= '\xFB00' && c <= '\xFDFD' -> 1 -- | c >= '\xFE00' && c <= '\xFE0F' -> 1 -- ambiguous -- | c >= '\xFE10' && c <= '\xFE19' -> 2 -- | c >= '\xFE20' && c <= '\xFE26' -> 1 -- | c >= '\xFE30' && c <= '\xFE6B' -> 2 -- | c >= '\xFE70' && c <= '\xFEFF' -> 1 -- | c >= '\xFF01' && c <= '\xFF60' -> 2 -- | c >= '\xFF61' && c <= '\x16A38' -> 1 -- | c >= '\x1B000' && c <= '\x1B001' -> 2 -- | c >= '\x1D000' && c <= '\x1F1FF' -> 1 -- | c >= '\x1F200' && c <= '\x1F251' -> 2 -- | c >= '\x1F300' && c <= '\x1F773' -> 1 -- | c >= '\x20000' && c <= '\x3FFFD' -> 2 -- | otherwise -> 1 hledger-lib-1.2/Hledger/Utils/Tree.hs0000644000000000000000000000525613035210046015627 0ustar0000000000000000module Hledger.Utils.Tree where -- import Data.Char import Data.List (foldl') import qualified Data.Map as M import Data.Tree -- import Text.Megaparsec -- import Text.Printf import Hledger.Utils.Regex -- import Hledger.Utils.UTF8IOCompat (error') -- standard tree helpers root = rootLabel subs = subForest branches = subForest -- | List just the leaf nodes of a tree leaves :: Tree a -> [a] leaves (Node v []) = [v] leaves (Node _ branches) = concatMap leaves branches -- | get the sub-tree rooted at the first (left-most, depth-first) occurrence -- of the specified node value subtreeat :: Eq a => a -> Tree a -> Maybe (Tree a) subtreeat v t | root t == v = Just t | otherwise = subtreeinforest v $ subs t -- | get the sub-tree for the specified node value in the first tree in -- forest in which it occurs. subtreeinforest :: Eq a => a -> [Tree a] -> Maybe (Tree a) subtreeinforest _ [] = Nothing subtreeinforest v (t:ts) = case (subtreeat v t) of Just t' -> Just t' Nothing -> subtreeinforest v ts -- | remove all nodes past a certain depth treeprune :: Int -> Tree a -> Tree a treeprune 0 t = Node (root t) [] treeprune d t = Node (root t) (map (treeprune $ d-1) $ branches t) -- | apply f to all tree nodes treemap :: (a -> b) -> Tree a -> Tree b treemap f t = Node (f $ root t) (map (treemap f) $ branches t) -- | remove all subtrees whose nodes do not fulfill predicate treefilter :: (a -> Bool) -> Tree a -> Tree a treefilter f t = Node (root t) (map (treefilter f) $ filter (treeany f) $ branches t) -- | is predicate true in any node of tree ? treeany :: (a -> Bool) -> Tree a -> Bool treeany f t = f (root t) || any (treeany f) (branches t) -- treedrop -- remove the leaves which do fulfill predicate. -- treedropall -- do this repeatedly. -- | show a compact ascii representation of a tree showtree :: Show a => Tree a -> String showtree = unlines . filter (regexMatches "[^ \\|]") . lines . drawTree . treemap show -- | show a compact ascii representation of a forest showforest :: Show a => Forest a -> String showforest = concatMap showtree -- | An efficient-to-build tree suggested by Cale Gibbard, probably -- better than accountNameTreeFrom. newtype FastTree a = T (M.Map a (FastTree a)) deriving (Show, Eq, Ord) emptyTree = T M.empty mergeTrees :: (Ord a) => FastTree a -> FastTree a -> FastTree a mergeTrees (T m) (T m') = T (M.unionWith mergeTrees m m') treeFromPath :: [a] -> FastTree a treeFromPath [] = T M.empty treeFromPath (x:xs) = T (M.singleton x (treeFromPath xs)) treeFromPaths :: (Ord a) => [[a]] -> FastTree a treeFromPaths = foldl' mergeTrees emptyTree . map treeFromPath hledger-lib-1.2/Hledger/Utils/UTF8IOCompat.hs0000644000000000000000000001024513066746043017063 0ustar0000000000000000{-# LANGUAGE CPP #-} {- | UTF-8 aware string IO functions that will work across multiple platforms and GHC versions. Includes code from Text.Pandoc.UTF8 ((C) 2010 John MacFarlane). Example usage: import Prelude hiding (readFile,writeFile,appendFile,getContents,putStr,putStrLn) import UTF8IOCompat (readFile,writeFile,appendFile,getContents,putStr,putStrLn) import UTF8IOCompat (SystemString,fromSystemString,toSystemString,error',userError') 2013/4/10 update: we now trust that current GHC versions & platforms do the right thing, so this file is a no-op and on its way to being removed. Not carefully tested. -} -- TODO obsolete ? module Hledger.Utils.UTF8IOCompat ( readFile, writeFile, appendFile, getContents, hGetContents, putStr, putStrLn, hPutStr, hPutStrLn, -- SystemString, fromSystemString, toSystemString, error', userError', usageError, ) where -- import Control.Monad (liftM) -- import qualified Data.ByteString.Lazy as B -- import qualified Data.ByteString.Lazy.Char8 as B8 -- import qualified Data.ByteString.Lazy.UTF8 as U8 (toString, fromString) import Prelude hiding (readFile, writeFile, appendFile, getContents, putStr, putStrLn) import System.IO -- (Handle) -- #if __GLASGOW_HASKELL__ < 702 -- import Codec.Binary.UTF8.String as UTF8 (decodeString, encodeString, isUTF8Encoded) -- import System.Info (os) -- #endif -- bom :: B.ByteString -- bom = B.pack [0xEF, 0xBB, 0xBF] -- stripBOM :: B.ByteString -> B.ByteString -- stripBOM s | bom `B.isPrefixOf` s = B.drop 3 s -- stripBOM s = s -- readFile :: FilePath -> IO String -- readFile = liftM (U8.toString . stripBOM) . B.readFile -- writeFile :: FilePath -> String -> IO () -- writeFile f = B.writeFile f . U8.fromString -- appendFile :: FilePath -> String -> IO () -- appendFile f = B.appendFile f . U8.fromString -- getContents :: IO String -- getContents = liftM (U8.toString . stripBOM) B.getContents -- hGetContents :: Handle -> IO String -- hGetContents h = liftM (U8.toString . stripBOM) (B.hGetContents h) -- putStr :: String -> IO () -- putStr = bs_putStr . U8.fromString -- putStrLn :: String -> IO () -- putStrLn = bs_putStrLn . U8.fromString -- hPutStr :: Handle -> String -> IO () -- hPutStr h = bs_hPutStr h . U8.fromString -- hPutStrLn :: Handle -> String -> IO () -- hPutStrLn h = bs_hPutStrLn h . U8.fromString -- -- span GHC versions including 6.12.3 - 7.4.1: -- bs_putStr = B8.putStr -- bs_putStrLn = B8.putStrLn -- bs_hPutStr = B8.hPut -- bs_hPutStrLn h bs = B8.hPut h bs >> B8.hPut h (B.singleton 0x0a) -- | A string received from or being passed to the operating system, such -- as a file path, command-line argument, or environment variable name or -- value. With GHC versions before 7.2 on some platforms (posix) these are -- typically encoded. When converting, we assume the encoding is UTF-8 (cf -- ). type SystemString = String -- | Convert a system string to an ordinary string, decoding from UTF-8 if -- it appears to be UTF8-encoded and GHC version is less than 7.2. fromSystemString :: SystemString -> String -- #if __GLASGOW_HASKELL__ < 702 -- fromSystemString s = if UTF8.isUTF8Encoded s then UTF8.decodeString s else s -- #else fromSystemString = id -- #endif -- | Convert a unicode string to a system string, encoding with UTF-8 if -- we are on a posix platform with GHC < 7.2. toSystemString :: String -> SystemString -- #if __GLASGOW_HASKELL__ < 702 -- toSystemString = case os of -- "unix" -> UTF8.encodeString -- "linux" -> UTF8.encodeString -- "darwin" -> UTF8.encodeString -- _ -> id -- #else toSystemString = id -- #endif -- | A SystemString-aware version of error. error' :: String -> a error' = #if __GLASGOW_HASKELL__ < 800 -- (easier than if base < 4.9) error . toSystemString #else errorWithoutStackTrace . toSystemString #endif -- | A SystemString-aware version of userError. userError' :: String -> IOError userError' = userError . toSystemString -- | A SystemString-aware version of error that adds a usage hint. usageError :: String -> a usageError = error' . (++ " (use -h to see usage)") hledger-lib-1.2/tests/doctests.hs0000644000000000000000000000046213067266175015265 0ustar0000000000000000{-# LANGUAGE PackageImports #-} import Data.List import "Glob" System.FilePath.Glob import Test.DocTest main = do fs1 <- filter (not . isInfixOf "/.") <$> glob "Hledger/**/*.hs" -- fs2 <- filter (not . isInfixOf "/.") <$> glob "other/ledger-parse/**/*.hs" doctest $ ["Hledger.hs"] ++ fs1 -- ++ fs2 hledger-lib-1.2/Hledger.hs0000644000000000000000000000066013035210046013622 0ustar0000000000000000module Hledger ( module X ,tests_Hledger ) where import Test.HUnit import Hledger.Data as X import Hledger.Query as X import Hledger.Read as X hiding (samplejournal) import Hledger.Reports as X import Hledger.Utils as X tests_Hledger = TestList [ tests_Hledger_Data ,tests_Hledger_Query ,tests_Hledger_Read ,tests_Hledger_Reports ] hledger-lib-1.2/Hledger/Data.hs0000644000000000000000000000372413066173044014511 0ustar0000000000000000{-| The Hledger.Data library allows parsing and querying of C++ ledger-style journal files. It generally provides a compatible subset of C++ ledger's functionality. This package re-exports all the Hledger.Data.* modules (except UTF8, which requires an explicit import.) -} module Hledger.Data ( module Hledger.Data.Account, module Hledger.Data.AccountName, module Hledger.Data.Amount, module Hledger.Data.Commodity, module Hledger.Data.Dates, module Hledger.Data.Journal, module Hledger.Data.Ledger, module Hledger.Data.MarketPrice, module Hledger.Data.Period, module Hledger.Data.Posting, module Hledger.Data.RawOptions, module Hledger.Data.StringFormat, module Hledger.Data.Timeclock, module Hledger.Data.Transaction, module Hledger.Data.Types, tests_Hledger_Data ) where import Test.HUnit import Hledger.Data.Account import Hledger.Data.AccountName import Hledger.Data.Amount import Hledger.Data.Commodity import Hledger.Data.Dates import Hledger.Data.Journal import Hledger.Data.Ledger import Hledger.Data.MarketPrice import Hledger.Data.Period import Hledger.Data.Posting import Hledger.Data.RawOptions import Hledger.Data.StringFormat import Hledger.Data.Timeclock import Hledger.Data.Transaction import Hledger.Data.Types tests_Hledger_Data :: Test tests_Hledger_Data = TestList [ tests_Hledger_Data_Account ,tests_Hledger_Data_AccountName ,tests_Hledger_Data_Amount ,tests_Hledger_Data_Commodity ,tests_Hledger_Data_Journal ,tests_Hledger_Data_MarketPrice ,tests_Hledger_Data_Ledger ,tests_Hledger_Data_Posting -- ,tests_Hledger_Data_RawOptions -- ,tests_Hledger_Data_StringFormat ,tests_Hledger_Data_Timeclock ,tests_Hledger_Data_Transaction -- ,tests_Hledger_Data_Types ] hledger-lib-1.2/Hledger/Data/Account.hs0000644000000000000000000001606313042200120016061 0ustar0000000000000000{-# LANGUAGE RecordWildCards, StandaloneDeriving, OverloadedStrings #-} {-| An 'Account' has a name, a list of subaccounts, an optional parent account, and subaccounting-excluding and -including balances. -} module Hledger.Data.Account where import Data.List import Data.Maybe import qualified Data.Map as M import Safe (headMay, lookupJustDef) import Test.HUnit import Text.Printf import Hledger.Data.AccountName import Hledger.Data.Amount import Hledger.Data.Posting() import Hledger.Data.Types import Hledger.Utils -- deriving instance Show Account instance Show Account where show Account{..} = printf "Account %s (boring:%s, postings:%d, ebalance:%s, ibalance:%s)" aname (if aboring then "y" else "n" :: String) anumpostings (showMixedAmount aebalance) (showMixedAmount aibalance) instance Eq Account where (==) a b = aname a == aname b -- quick equality test for speed -- and -- [ aname a == aname b -- -- , aparent a == aparent b -- avoid infinite recursion -- , asubs a == asubs b -- , aebalance a == aebalance b -- , aibalance a == aibalance b -- ] nullacct = Account { aname = "" , aparent = Nothing , asubs = [] , anumpostings = 0 , aebalance = nullmixedamt , aibalance = nullmixedamt , aboring = False } -- | Derive 1. an account tree and 2. each account's total exclusive -- and inclusive changes from a list of postings. -- This is the core of the balance command (and of *ledger). -- The accounts are returned as a list in flattened tree order, -- and also reference each other as a tree. -- (The first account is the root of the tree.) accountsFromPostings :: [Posting] -> [Account] accountsFromPostings ps = let acctamts = [(paccount p,pamount p) | p <- ps] grouped = groupBy (\a b -> fst a == fst b) $ sort $ acctamts counted = [(a, length acctamts) | acctamts@((a,_):_) <- grouped] summed = map (\as@((aname,_):_) -> (aname, sumStrict $ map snd as)) grouped -- always non-empty nametree = treeFromPaths $ map (expandAccountName . fst) summed acctswithnames = nameTreeToAccount "root" nametree acctswithnumps = mapAccounts setnumps acctswithnames where setnumps a = a{anumpostings=fromMaybe 0 $ lookup (aname a) counted} acctswithebals = mapAccounts setebalance acctswithnumps where setebalance a = a{aebalance=lookupJustDef nullmixedamt (aname a) summed} acctswithibals = sumAccounts acctswithebals acctswithparents = tieAccountParents acctswithibals acctsflattened = flattenAccounts acctswithparents in acctsflattened -- | Convert an AccountName tree to an Account tree nameTreeToAccount :: AccountName -> FastTree AccountName -> Account nameTreeToAccount rootname (T m) = nullacct{ aname=rootname, asubs=map (uncurry nameTreeToAccount) $ M.assocs m } -- | Tie the knot so all subaccounts' parents are set correctly. tieAccountParents :: Account -> Account tieAccountParents = tie Nothing where tie parent a@Account{..} = a' where a' = a{aparent=parent, asubs=map (tie (Just a')) asubs} -- | Get this account's parent accounts, from the nearest up to the root. parentAccounts :: Account -> [Account] parentAccounts Account{aparent=Nothing} = [] parentAccounts Account{aparent=Just a} = a:parentAccounts a -- | List the accounts at each level of the account tree. accountsLevels :: Account -> [[Account]] accountsLevels = takeWhile (not . null) . iterate (concatMap asubs) . (:[]) -- | Map a (non-tree-structure-modifying) function over this and sub accounts. mapAccounts :: (Account -> Account) -> Account -> Account mapAccounts f a = f a{asubs = map (mapAccounts f) $ asubs a} -- | Is the predicate true on any of this account or its subaccounts ? anyAccounts :: (Account -> Bool) -> Account -> Bool anyAccounts p a | p a = True | otherwise = any (anyAccounts p) $ asubs a -- | Add subaccount-inclusive balances to an account tree. sumAccounts :: Account -> Account sumAccounts a | null $ asubs a = a{aibalance=aebalance a} | otherwise = a{aibalance=ibal, asubs=subs} where subs = map sumAccounts $ asubs a ibal = sum $ aebalance a : map aibalance subs -- | Remove all subaccounts below a certain depth. clipAccounts :: Int -> Account -> Account clipAccounts 0 a = a{asubs=[]} clipAccounts d a = a{asubs=subs} where subs = map (clipAccounts (d-1)) $ asubs a -- | Remove subaccounts below the specified depth, aggregating their balance at the depth limit -- (accounts at the depth limit will have any sub-balances merged into their exclusive balance). clipAccountsAndAggregate :: Int -> [Account] -> [Account] clipAccountsAndAggregate d as = combined where clipped = [a{aname=clipOrEllipsifyAccountName d $ aname a} | a <- as] combined = [a{aebalance=sum (map aebalance same)} | same@(a:_) <- groupBy (\a1 a2 -> aname a1 == aname a2) clipped] {- test cases, assuming d=1: assets:cash 1 1 assets:checking 1 1 -> as: [assets:cash 1 1, assets:checking 1 1] clipped: [assets 1 1, assets 1 1] combined: [assets 2 2] assets 0 2 assets:cash 1 1 assets:checking 1 1 -> as: [assets 0 2, assets:cash 1 1, assets:checking 1 1] clipped: [assets 0 2, assets 1 1, assets 1 1] combined: [assets 2 2] assets 0 2 assets:bank 1 2 assets:bank:checking 1 1 -> as: [assets 0 2, assets:bank 1 2, assets:bank:checking 1 1] clipped: [assets 0 2, assets 1 2, assets 1 1] combined: [assets 2 2] -} -- | Remove all leaf accounts and subtrees matching a predicate. pruneAccounts :: (Account -> Bool) -> Account -> Maybe Account pruneAccounts p = headMay . prune where prune a | null prunedsubs = if p a then [] else [a'] | otherwise = [a'] where prunedsubs = concatMap prune $ asubs a a' = a{asubs=prunedsubs} -- | Flatten an account tree into a list, which is sometimes -- convenient. Note since accounts link to their parents/subs, the -- tree's structure remains intact and can still be used. It's a tree/list! flattenAccounts :: Account -> [Account] flattenAccounts a = squish a [] where squish a as = a : Prelude.foldr squish as (asubs a) -- | Filter an account tree (to a list). filterAccounts :: (Account -> Bool) -> Account -> [Account] filterAccounts p a | p a = a : concatMap (filterAccounts p) (asubs a) | otherwise = concatMap (filterAccounts p) (asubs a) -- | Search an account list by name. lookupAccount :: AccountName -> [Account] -> Maybe Account lookupAccount a = find ((==a).aname) -- debug helpers printAccounts :: Account -> IO () printAccounts = putStrLn . showAccounts showAccounts = unlines . map showAccountDebug . flattenAccounts showAccountsBoringFlag = unlines . map (show . aboring) . flattenAccounts showAccountDebug a = printf "%-25s %4s %4s %s" (aname a) (showMixedAmount $ aebalance a) (showMixedAmount $ aibalance a) (if aboring a then "b" else " " :: String) tests_Hledger_Data_Account = TestList [ ] hledger-lib-1.2/Hledger/Data/AccountName.hs0000644000000000000000000001774413066173044016715 0ustar0000000000000000{-# LANGUAGE NoMonomorphismRestriction, OverloadedStrings #-} {-| 'AccountName's are strings like @assets:cash:petty@, with multiple components separated by ':'. From a set of these we derive the account hierarchy. -} module Hledger.Data.AccountName where import Data.List import Data.Monoid import Data.Text (Text) import qualified Data.Text as T import Data.Tree import Test.HUnit import Text.Printf import Hledger.Data.Types import Hledger.Utils acctsepchar :: Char acctsepchar = ':' acctsep :: Text acctsep = T.pack [acctsepchar] -- accountNameComponents :: AccountName -> [String] -- accountNameComponents = splitAtElement acctsepchar accountNameComponents :: AccountName -> [Text] accountNameComponents = T.splitOn acctsep accountNameFromComponents :: [Text] -> AccountName accountNameFromComponents = T.intercalate acctsep accountLeafName :: AccountName -> Text accountLeafName = last . accountNameComponents -- | Truncate all account name components but the last to two characters. accountSummarisedName :: AccountName -> Text accountSummarisedName a -- length cs > 1 = take 2 (head cs) ++ ":" ++ a' | length cs > 1 = (T.intercalate ":" (map (T.take 2) $ init cs)) <> ":" <> a' | otherwise = a' where cs = accountNameComponents a a' = accountLeafName a accountNameLevel :: AccountName -> Int accountNameLevel "" = 0 accountNameLevel a = T.length (T.filter (==acctsepchar) a) + 1 accountNameDrop :: Int -> AccountName -> AccountName accountNameDrop n = accountNameFromComponents . drop n . accountNameComponents -- | ["a:b:c","d:e"] -> ["a","a:b","a:b:c","d","d:e"] expandAccountNames :: [AccountName] -> [AccountName] expandAccountNames as = nub $ concatMap expandAccountName as -- | "a:b:c" -> ["a","a:b","a:b:c"] expandAccountName :: AccountName -> [AccountName] expandAccountName = map accountNameFromComponents . tail . inits . accountNameComponents -- | ["a:b:c","d:e"] -> ["a","d"] topAccountNames :: [AccountName] -> [AccountName] topAccountNames as = [a | a <- expandAccountNames as, accountNameLevel a == 1] parentAccountName :: AccountName -> AccountName parentAccountName = accountNameFromComponents . init . accountNameComponents parentAccountNames :: AccountName -> [AccountName] parentAccountNames a = parentAccountNames' $ parentAccountName a where parentAccountNames' "" = [] parentAccountNames' a = a : parentAccountNames' (parentAccountName a) -- | Is the first account a parent or other ancestor of (and not the same as) the second ? isAccountNamePrefixOf :: AccountName -> AccountName -> Bool isAccountNamePrefixOf = T.isPrefixOf . (<> acctsep) isSubAccountNameOf :: AccountName -> AccountName -> Bool s `isSubAccountNameOf` p = (p `isAccountNamePrefixOf` s) && (accountNameLevel s == (accountNameLevel p + 1)) -- | From a list of account names, select those which are direct -- subaccounts of the given account name. subAccountNamesFrom :: [AccountName] -> AccountName -> [AccountName] subAccountNamesFrom accts a = filter (`isSubAccountNameOf` a) accts -- | Convert a list of account names to a tree. accountNameTreeFrom :: [AccountName] -> Tree AccountName accountNameTreeFrom accts = Node "root" (accounttreesfrom (topAccountNames accts)) where accounttreesfrom :: [AccountName] -> [Tree AccountName] accounttreesfrom [] = [] accounttreesfrom as = [Node a (accounttreesfrom $ subs a) | a <- as] subs = subAccountNamesFrom (expandAccountNames accts) nullaccountnametree = Node "root" [] -- | Elide an account name to fit in the specified width. -- From the ledger 2.6 news: -- -- @ -- What Ledger now does is that if an account name is too long, it will -- start abbreviating the first parts of the account name down to two -- letters in length. If this results in a string that is still too -- long, the front will be elided -- not the end. For example: -- -- Expenses:Cash ; OK, not too long -- Ex:Wednesday:Cash ; "Expenses" was abbreviated to fit -- Ex:We:Afternoon:Cash ; "Expenses" and "Wednesday" abbreviated -- ; Expenses:Wednesday:Afternoon:Lunch:Snack:Candy:Chocolate:Cash -- ..:Af:Lu:Sn:Ca:Ch:Cash ; Abbreviated and elided! -- @ elideAccountName :: Int -> AccountName -> AccountName elideAccountName width s -- XXX special case for transactions register's multi-account pseudo-names | " (split)" `T.isSuffixOf` s = let names = T.splitOn ", " $ T.take (T.length s - 8) s widthpername = (max 0 (width - 8 - 2 * (max 1 (length names) - 1))) `div` length names in fitText Nothing (Just width) True False $ (<>" (split)") $ T.intercalate ", " $ [accountNameFromComponents $ elideparts widthpername [] $ accountNameComponents s' | s' <- names] | otherwise = fitText Nothing (Just width) True False $ accountNameFromComponents $ elideparts width [] $ accountNameComponents s where elideparts :: Int -> [Text] -> [Text] -> [Text] elideparts width done ss | textWidth (accountNameFromComponents $ done++ss) <= width = done++ss | length ss > 1 = elideparts width (done++[textTakeWidth 2 $ head ss]) (tail ss) | otherwise = done++ss -- | Keep only the first n components of an account name, where n -- is a positive integer. If n is 0, returns the empty string. clipAccountName :: Int -> AccountName -> AccountName clipAccountName n = accountNameFromComponents . take n . accountNameComponents -- | Keep only the first n components of an account name, where n -- is a positive integer. If n is 0, returns "...". clipOrEllipsifyAccountName :: Int -> AccountName -> AccountName clipOrEllipsifyAccountName 0 = const "..." clipOrEllipsifyAccountName n = accountNameFromComponents . take n . accountNameComponents -- | Escape an AccountName for use within a regular expression. -- >>> putStr $ escapeName "First?!#$*?$(*) !@^#*? %)*!@#" -- First\?!#\$\*\?\$\(\*\) !@\^#\*\? %\)\*!@# escapeName :: AccountName -> Regexp escapeName = regexReplaceBy "[[?+|()*\\\\^$]" ("\\" <>) . T.unpack -- | Convert an account name to a regular expression matching it and its subaccounts. accountNameToAccountRegex :: AccountName -> Regexp accountNameToAccountRegex "" = "" accountNameToAccountRegex a = printf "^%s(:|$)" (escapeName a) -- | Convert an account name to a regular expression matching it but not its subaccounts. accountNameToAccountOnlyRegex :: AccountName -> Regexp accountNameToAccountOnlyRegex "" = "" accountNameToAccountOnlyRegex a = printf "^%s$" $ escapeName a -- XXX pack -- | Convert an exact account-matching regular expression to a plain account name. accountRegexToAccountName :: Regexp -> AccountName accountRegexToAccountName = T.pack . regexReplace "^\\^(.*?)\\(:\\|\\$\\)$" "\\1" -- XXX pack -- | Does this string look like an exact account-matching regular expression ? isAccountRegex :: String -> Bool isAccountRegex s = take 1 s == "^" && (take 5 $ reverse s) == ")$|:(" tests_Hledger_Data_AccountName = TestList [ "accountNameTreeFrom" ~: do accountNameTreeFrom ["a"] `is` Node "root" [Node "a" []] accountNameTreeFrom ["a","b"] `is` Node "root" [Node "a" [], Node "b" []] accountNameTreeFrom ["a","a:b"] `is` Node "root" [Node "a" [Node "a:b" []]] accountNameTreeFrom ["a:b:c"] `is` Node "root" [Node "a" [Node "a:b" [Node "a:b:c" []]]] ,"expandAccountNames" ~: expandAccountNames ["assets:cash","assets:checking","expenses:vacation"] `is` ["assets","assets:cash","assets:checking","expenses","expenses:vacation"] ,"isAccountNamePrefixOf" ~: do "assets" `isAccountNamePrefixOf` "assets" `is` False "assets" `isAccountNamePrefixOf` "assets:bank" `is` True "assets" `isAccountNamePrefixOf` "assets:bank:checking" `is` True "my assets" `isAccountNamePrefixOf` "assets:bank" `is` False ,"isSubAccountNameOf" ~: do "assets" `isSubAccountNameOf` "assets" `is` False "assets:bank" `isSubAccountNameOf` "assets" `is` True "assets:bank:checking" `isSubAccountNameOf` "assets" `is` False "assets:bank" `isSubAccountNameOf` "my assets" `is` False ] hledger-lib-1.2/Hledger/Data/Amount.hs0000644000000000000000000006336213042200120015734 0ustar0000000000000000{-| A simple 'Amount' is some quantity of money, shares, or anything else. It has a (possibly null) 'CommoditySymbol' and a numeric quantity: @ $1 £-50 EUR 3.44 GOOG 500 1.5h 90 apples 0 @ It may also have an assigned 'Price', representing this amount's per-unit or total cost in a different commodity. If present, this is rendered like so: @ EUR 2 \@ $1.50 (unit price) EUR 2 \@\@ $3 (total price) @ A 'MixedAmount' is zero or more simple amounts, so can represent multiple commodities; this is the type most often used: @ 0 $50 + EUR 3 16h + $13.55 + AAPL 500 + 6 oranges @ When a mixed amount has been \"normalised\", it has no more than one amount in each commodity and no zero amounts; or it has just a single zero amount and no others. Limited arithmetic with simple and mixed amounts is supported, best used with similar amounts since it mostly ignores assigned prices and commodity exchange rates. -} {-# LANGUAGE CPP, StandaloneDeriving, RecordWildCards, OverloadedStrings #-} module Hledger.Data.Amount ( -- * Amount amount, nullamt, missingamt, num, usd, eur, gbp, hrs, at, (@@), amountWithCommodity, -- ** arithmetic costOfAmount, divideAmount, -- ** rendering amountstyle, showAmount, showAmountWithZeroCommodity, showAmountDebug, showAmountWithoutPrice, maxprecision, maxprecisionwithpoint, setAmountPrecision, withPrecision, canonicaliseAmount, -- * MixedAmount nullmixedamt, missingmixedamt, mixed, amounts, filterMixedAmount, filterMixedAmountByCommodity, normaliseMixedAmountSquashPricesForDisplay, normaliseMixedAmount, -- ** arithmetic costOfMixedAmount, divideMixedAmount, averageMixedAmounts, isNegativeAmount, isNegativeMixedAmount, isZeroAmount, isReallyZeroAmount, isZeroMixedAmount, isReallyZeroMixedAmount, isReallyZeroMixedAmountCost, -- ** rendering showMixedAmount, showMixedAmountOneLine, showMixedAmountDebug, showMixedAmountWithoutPrice, showMixedAmountOneLineWithoutPrice, showMixedAmountWithZeroCommodity, showMixedAmountWithPrecision, setMixedAmountPrecision, canonicaliseMixedAmount, -- * misc. ltraceamount, tests_Hledger_Data_Amount ) where import Data.Char (isDigit) import Data.Decimal (roundTo) import Data.Function (on) import Data.List import Data.Map (findWithDefault) import Data.Maybe -- import Data.Text (Text) import qualified Data.Text as T import Test.HUnit import Text.Printf import qualified Data.Map as M import Hledger.Data.Types import Hledger.Data.Commodity import Hledger.Utils deriving instance Show MarketPrice amountstyle = AmountStyle L False 0 (Just '.') Nothing ------------------------------------------------------------------------------- -- Amount instance Show Amount where show _a@Amount{..} -- debugLevel < 2 = showAmountWithoutPrice a -- debugLevel < 3 = showAmount a | debugLevel < 6 = printf "Amount {acommodity=%s, aquantity=%s, ..}" (show acommodity) (show aquantity) | otherwise = --showAmountDebug a printf "Amount {acommodity=%s, aquantity=%s, aprice=%s, astyle=%s}" (show acommodity) (show aquantity) (showPriceDebug aprice) (show astyle) instance Num Amount where abs a@Amount{aquantity=q} = a{aquantity=abs q} signum a@Amount{aquantity=q} = a{aquantity=signum q} fromInteger i = nullamt{aquantity=fromInteger i} negate a@Amount{aquantity=q} = a{aquantity=(-q)} (+) = similarAmountsOp (+) (-) = similarAmountsOp (-) (*) = similarAmountsOp (*) -- | The empty simple amount. amount, nullamt :: Amount amount = Amount{acommodity="", aquantity=0, aprice=NoPrice, astyle=amountstyle} nullamt = amount -- | A temporary value for parsed transactions which had no amount specified. missingamt :: Amount missingamt = amount{acommodity="AUTO"} -- Handy amount constructors for tests. -- usd/eur/gbp round their argument to a whole number of pennies/cents. num n = amount{acommodity="", aquantity=n} hrs n = amount{acommodity="h", aquantity=n, astyle=amountstyle{asprecision=2, ascommodityside=R}} usd n = amount{acommodity="$", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=2}} eur n = amount{acommodity="€", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=2}} gbp n = amount{acommodity="£", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=2}} amt `at` priceamt = amt{aprice=UnitPrice priceamt} amt @@ priceamt = amt{aprice=TotalPrice priceamt} -- | Apply a binary arithmetic operator to two amounts, which should -- be in the same commodity if non-zero (warning, this is not checked). -- A zero result keeps the commodity of the second amount. -- The result's display style is that of the second amount, with -- precision set to the highest of either amount. -- Prices are ignored and discarded. -- Remember: the caller is responsible for ensuring both amounts have the same commodity. similarAmountsOp :: (Quantity -> Quantity -> Quantity) -> Amount -> Amount -> Amount similarAmountsOp op Amount{acommodity=_, aquantity=q1, astyle=AmountStyle{asprecision=p1}} Amount{acommodity=c2, aquantity=q2, astyle=s2@AmountStyle{asprecision=p2}} = -- trace ("a1:"++showAmountDebug a1) $ trace ("a2:"++showAmountDebug a2) $ traceWith (("= :"++).showAmountDebug) amount{acommodity=c2, aquantity=q1 `op` q2, astyle=s2{asprecision=max p1 p2}} -- c1==c2 || q1==0 || q2==0 = -- otherwise = error "tried to do simple arithmetic with amounts in different commodities" -- | Convert an amount to the specified commodity, ignoring and discarding -- any assigned prices and assuming an exchange rate of 1. amountWithCommodity :: CommoditySymbol -> Amount -> Amount amountWithCommodity c a = a{acommodity=c, aprice=NoPrice} -- | Convert an amount to the commodity of its assigned price, if any. Notes: -- -- - price amounts must be MixedAmounts with exactly one component Amount (or there will be a runtime error) XXX -- -- - price amounts should be positive, though this is not currently enforced costOfAmount :: Amount -> Amount costOfAmount a@Amount{aquantity=q, aprice=price} = case price of NoPrice -> a UnitPrice p@Amount{aquantity=pq} -> p{aquantity=pq * q} TotalPrice p@Amount{aquantity=pq} -> p{aquantity=pq * signum q} -- | Divide an amount's quantity by a constant. divideAmount :: Amount -> Quantity -> Amount divideAmount a@Amount{aquantity=q} d = a{aquantity=q/d} -- | Is this amount negative ? The price is ignored. isNegativeAmount :: Amount -> Bool isNegativeAmount Amount{aquantity=q} = q < 0 digits = "123456789" :: String -- | Does this amount appear to be zero when displayed with its given precision ? isZeroAmount :: Amount -> Bool isZeroAmount a -- a==missingamt = False | otherwise = (null . filter (`elem` digits) . showAmountWithoutPriceOrCommodity) a -- | Is this amount "really" zero, regardless of the display precision ? isReallyZeroAmount :: Amount -> Bool isReallyZeroAmount Amount{aquantity=q} = q == 0 -- | Get the string representation of an amount, based on its commodity's -- display settings except using the specified precision. showAmountWithPrecision :: Int -> Amount -> String showAmountWithPrecision p = showAmount . setAmountPrecision p -- | Set an amount's display precision. setAmountPrecision :: Int -> Amount -> Amount setAmountPrecision p a@Amount{astyle=s} = a{astyle=s{asprecision=p}} -- | Set an amount's display precision, flipped. withPrecision :: Amount -> Int -> Amount withPrecision = flip setAmountPrecision -- | Get a string representation of an amount for debugging, -- appropriate to the current debug level. 9 shows maximum detail. showAmountDebug :: Amount -> String showAmountDebug Amount{acommodity="AUTO"} = "(missing)" showAmountDebug Amount{..} = printf "Amount {acommodity=%s, aquantity=%s, aprice=%s, astyle=%s}" (show acommodity) (show aquantity) (showPriceDebug aprice) (show astyle) -- | Get the string representation of an amount, without any \@ price. showAmountWithoutPrice :: Amount -> String showAmountWithoutPrice a = showAmount a{aprice=NoPrice} -- | Get the string representation of an amount, without any price or commodity symbol. showAmountWithoutPriceOrCommodity :: Amount -> String showAmountWithoutPriceOrCommodity a = showAmount a{acommodity="", aprice=NoPrice} showPrice :: Price -> String showPrice NoPrice = "" showPrice (UnitPrice pa) = " @ " ++ showAmount pa showPrice (TotalPrice pa) = " @@ " ++ showAmount pa showPriceDebug :: Price -> String showPriceDebug NoPrice = "" showPriceDebug (UnitPrice pa) = " @ " ++ showAmountDebug pa showPriceDebug (TotalPrice pa) = " @@ " ++ showAmountDebug pa -- | Get the string representation of an amount, based on its -- commodity's display settings. String representations equivalent to -- zero are converted to just \"0\". The special "missing" amount is -- displayed as the empty string. showAmount :: Amount -> String showAmount = showAmountHelper False showAmountHelper :: Bool -> Amount -> String showAmountHelper _ Amount{acommodity="AUTO"} = "" showAmountHelper showzerocommodity a@(Amount{acommodity=c, aprice=p, astyle=AmountStyle{..}}) = case ascommodityside of L -> printf "%s%s%s%s" (T.unpack c') space quantity' price R -> printf "%s%s%s%s" quantity' space (T.unpack c') price where quantity = showamountquantity a displayingzero = null $ filter (`elem` digits) $ quantity (quantity',c') | displayingzero && not showzerocommodity = ("0","") | otherwise = (quantity, quoteCommoditySymbolIfNeeded c) space = if (not (T.null c') && ascommodityspaced) then " " else "" :: String price = showPrice p -- | Like showAmount, but show a zero amount's commodity if it has one. showAmountWithZeroCommodity :: Amount -> String showAmountWithZeroCommodity = showAmountHelper True -- | Get the string representation of the number part of of an amount, -- using the display settings from its commodity. showamountquantity :: Amount -> String showamountquantity Amount{aquantity=q, astyle=AmountStyle{asprecision=p, asdecimalpoint=mdec, asdigitgroups=mgrps}} = punctuatenumber (fromMaybe '.' mdec) mgrps $ qstr where -- isint n = fromIntegral (round n) == n qstr -- p == maxprecision && isint q = printf "%d" (round q::Integer) | p == maxprecisionwithpoint = show q | p == maxprecision = chopdotzero $ show q | otherwise = show $ roundTo (fromIntegral p) q -- | Replace a number string's decimal point with the specified character, -- and add the specified digit group separators. The last digit group will -- be repeated as needed. punctuatenumber :: Char -> Maybe DigitGroupStyle -> String -> String punctuatenumber dec mgrps s = sign ++ reverse (applyDigitGroupStyle mgrps (reverse int)) ++ frac'' where (sign,num) = break isDigit s (int,frac) = break (=='.') num frac' = dropWhile (=='.') frac frac'' | null frac' = "" | otherwise = dec:frac' applyDigitGroupStyle :: Maybe DigitGroupStyle -> String -> String applyDigitGroupStyle Nothing s = s applyDigitGroupStyle (Just (DigitGroups c gs)) s = addseps (repeatLast gs) s where addseps [] s = s addseps (g:gs) s | length s <= g = s | otherwise = let (part,rest) = splitAt g s in part ++ [c] ++ addseps gs rest repeatLast [] = [] repeatLast gs = init gs ++ repeat (last gs) chopdotzero str = reverse $ case reverse str of '0':'.':s -> s s -> s -- | For rendering: a special precision value which means show all available digits. maxprecision :: Int maxprecision = 999998 -- | For rendering: a special precision value which forces display of a decimal point. maxprecisionwithpoint :: Int maxprecisionwithpoint = 999999 -- like journalCanonicaliseAmounts -- | Canonicalise an amount's display style using the provided commodity style map. canonicaliseAmount :: M.Map CommoditySymbol AmountStyle -> Amount -> Amount canonicaliseAmount styles a@Amount{acommodity=c, astyle=s} = a{astyle=s'} where s' = findWithDefault s c styles ------------------------------------------------------------------------------- -- MixedAmount instance Show MixedAmount where show | debugLevel < 3 = intercalate "\\n" . lines . showMixedAmountWithoutPrice -- debugLevel < 6 = intercalate "\\n" . lines . showMixedAmount | otherwise = showMixedAmountDebug instance Num MixedAmount where fromInteger i = Mixed [fromInteger i] negate (Mixed as) = Mixed $ map negate as (+) (Mixed as) (Mixed bs) = normaliseMixedAmount $ Mixed $ as ++ bs (*) = error' "error, mixed amounts do not support multiplication" abs = error' "error, mixed amounts do not support abs" signum = error' "error, mixed amounts do not support signum" -- | The empty mixed amount. nullmixedamt :: MixedAmount nullmixedamt = Mixed [] -- | A temporary value for parsed transactions which had no amount specified. missingmixedamt :: MixedAmount missingmixedamt = Mixed [missingamt] -- | Convert amounts in various commodities into a normalised MixedAmount. mixed :: [Amount] -> MixedAmount mixed = normaliseMixedAmount . Mixed -- | Simplify a mixed amount's component amounts: -- -- * amounts in the same commodity are combined unless they have different prices or total prices -- -- * multiple zero amounts, all with the same non-null commodity, are replaced by just the last of them, preserving the commodity and amount style (all but the last zero amount are discarded) -- -- * multiple zero amounts with multiple commodities, or no commodities, are replaced by one commodity-less zero amount -- -- * an empty amount list is replaced by one commodity-less zero amount -- -- * the special "missing" mixed amount remains unchanged -- normaliseMixedAmount :: MixedAmount -> MixedAmount normaliseMixedAmount = normaliseHelper False normaliseHelper :: Bool -> MixedAmount -> MixedAmount normaliseHelper squashprices (Mixed as) | missingamt `elem` as = missingmixedamt -- missingamt should always be alone, but detect it even if not | null nonzeros = Mixed [newzero] | otherwise = Mixed nonzeros where newzero = case filter (/= "") (map acommodity zeros) of _:_ -> last zeros _ -> nullamt (zeros, nonzeros) = partition isReallyZeroAmount $ map sumSimilarAmountsUsingFirstPrice $ groupBy groupfn $ sortBy sortfn $ as sortfn | squashprices = compare `on` acommodity | otherwise = compare `on` \a -> (acommodity a, aprice a) groupfn | squashprices = (==) `on` acommodity | otherwise = \a1 a2 -> acommodity a1 == acommodity a2 && combinableprices a1 a2 combinableprices Amount{aprice=NoPrice} Amount{aprice=NoPrice} = True combinableprices Amount{aprice=UnitPrice p1} Amount{aprice=UnitPrice p2} = p1 == p2 combinableprices _ _ = False tests_normaliseMixedAmount = [ "normaliseMixedAmount" ~: do -- assertEqual "missing amount is discarded" (Mixed [nullamt]) (normaliseMixedAmount $ Mixed [usd 0, missingamt]) assertEqual "any missing amount means a missing mixed amount" missingmixedamt (normaliseMixedAmount $ Mixed [usd 0, missingamt]) assertEqual "unpriced same-commodity amounts are combined" (Mixed [usd 2]) (normaliseMixedAmount $ Mixed [usd 0, usd 2]) -- amounts with same unit price are combined normaliseMixedAmount (Mixed [usd 1 `at` eur 1, usd 1 `at` eur 1]) `is` Mixed [usd 2 `at` eur 1] -- amounts with different unit prices are not combined normaliseMixedAmount (Mixed [usd 1 `at` eur 1, usd 1 `at` eur 2]) `is` Mixed [usd 1 `at` eur 1, usd 1 `at` eur 2] -- amounts with total prices are not combined normaliseMixedAmount (Mixed [usd 1 @@ eur 1, usd 1 @@ eur 1]) `is` Mixed [usd 1 @@ eur 1, usd 1 @@ eur 1] ] -- | Like normaliseMixedAmount, but combine each commodity's amounts -- into just one by throwing away all prices except the first. This is -- only used as a rendering helper, and could show a misleading price. normaliseMixedAmountSquashPricesForDisplay :: MixedAmount -> MixedAmount normaliseMixedAmountSquashPricesForDisplay = normaliseHelper True tests_normaliseMixedAmountSquashPricesForDisplay = [ "normaliseMixedAmountSquashPricesForDisplay" ~: do normaliseMixedAmountSquashPricesForDisplay (Mixed []) `is` Mixed [nullamt] assertBool "" $ isZeroMixedAmount $ normaliseMixedAmountSquashPricesForDisplay (Mixed [usd 10 ,usd 10 @@ eur 7 ,usd (-10) ,usd (-10) @@ eur 7 ]) ] -- | Sum same-commodity amounts in a lossy way, applying the first -- price to the result and discarding any other prices. Only used as a -- rendering helper. sumSimilarAmountsUsingFirstPrice :: [Amount] -> Amount sumSimilarAmountsUsingFirstPrice [] = nullamt sumSimilarAmountsUsingFirstPrice as = (sumStrict as){aprice=aprice $ head as} -- -- | Sum same-commodity amounts. If there were different prices, set -- -- the price to a special marker indicating "various". Only used as a -- -- rendering helper. -- sumSimilarAmountsNotingPriceDifference :: [Amount] -> Amount -- sumSimilarAmountsNotingPriceDifference [] = nullamt -- sumSimilarAmountsNotingPriceDifference as = undefined -- | Get a mixed amount's component amounts. amounts :: MixedAmount -> [Amount] amounts (Mixed as) = as -- | Filter a mixed amount's component amounts by a predicate. filterMixedAmount :: (Amount -> Bool) -> MixedAmount -> MixedAmount filterMixedAmount p (Mixed as) = Mixed $ filter p as -- | Return an unnormalised MixedAmount containing exactly one Amount -- with the specified commodity and the quantity of that commodity -- found in the original. NB if Amount's quantity is zero it will be -- discarded next time the MixedAmount gets normalised. filterMixedAmountByCommodity :: CommoditySymbol -> MixedAmount -> MixedAmount filterMixedAmountByCommodity c (Mixed as) = Mixed as' where as' = case filter ((==c) . acommodity) as of [] -> [nullamt{acommodity=c}] as'' -> [sum as''] -- | Convert a mixed amount's component amounts to the commodity of their -- assigned price, if any. costOfMixedAmount :: MixedAmount -> MixedAmount costOfMixedAmount (Mixed as) = Mixed $ map costOfAmount as -- | Divide a mixed amount's quantities by a constant. divideMixedAmount :: MixedAmount -> Quantity -> MixedAmount divideMixedAmount (Mixed as) d = Mixed $ map (flip divideAmount d) as -- | Calculate the average of some mixed amounts. averageMixedAmounts :: [MixedAmount] -> MixedAmount averageMixedAmounts [] = 0 averageMixedAmounts as = sum as `divideMixedAmount` fromIntegral (length as) -- | Is this mixed amount negative, if it can be normalised to a single commodity ? isNegativeMixedAmount :: MixedAmount -> Maybe Bool isNegativeMixedAmount m = case as of [a] -> Just $ isNegativeAmount a _ -> Nothing where as = amounts $ normaliseMixedAmountSquashPricesForDisplay m -- | Does this mixed amount appear to be zero when displayed with its given precision ? isZeroMixedAmount :: MixedAmount -> Bool isZeroMixedAmount = all isZeroAmount . amounts . normaliseMixedAmountSquashPricesForDisplay -- | Is this mixed amount "really" zero ? See isReallyZeroAmount. isReallyZeroMixedAmount :: MixedAmount -> Bool isReallyZeroMixedAmount = all isReallyZeroAmount . amounts . normaliseMixedAmountSquashPricesForDisplay -- | Is this mixed amount "really" zero, after converting to cost -- commodities where possible ? isReallyZeroMixedAmountCost :: MixedAmount -> Bool isReallyZeroMixedAmountCost = isReallyZeroMixedAmount . costOfMixedAmount -- -- | MixedAmount derived Eq instance in Types.hs doesn't know that we -- -- want $0 = EUR0 = 0. Yet we don't want to drag all this code over there. -- -- For now, use this when cross-commodity zero equality is important. -- mixedAmountEquals :: MixedAmount -> MixedAmount -> Bool -- mixedAmountEquals a b = amounts a' == amounts b' || (isZeroMixedAmount a' && isZeroMixedAmount b') -- where a' = normaliseMixedAmountSquashPricesForDisplay a -- b' = normaliseMixedAmountSquashPricesForDisplay b -- | Get the string representation of a mixed amount, after -- normalising it to one amount per commodity. Assumes amounts have -- no or similar prices, otherwise this can show misleading prices. showMixedAmount :: MixedAmount -> String showMixedAmount = showMixedAmountHelper False False -- | Like showMixedAmount, but zero amounts are shown with their -- commodity if they have one. showMixedAmountWithZeroCommodity :: MixedAmount -> String showMixedAmountWithZeroCommodity = showMixedAmountHelper True False -- | Get the one-line string representation of a mixed amount. showMixedAmountOneLine :: MixedAmount -> String showMixedAmountOneLine = showMixedAmountHelper False True showMixedAmountHelper :: Bool -> Bool -> MixedAmount -> String showMixedAmountHelper showzerocommodity useoneline m = join $ map showamt $ amounts $ normaliseMixedAmountSquashPricesForDisplay m where join | useoneline = intercalate ", " | otherwise = vConcatRightAligned showamt | showzerocommodity = showAmountWithZeroCommodity | otherwise = showAmount -- | Compact labelled trace of a mixed amount, for debugging. ltraceamount :: String -> MixedAmount -> MixedAmount ltraceamount s = traceWith (((s ++ ": ") ++).showMixedAmount) -- | Set the display precision in the amount's commodities. setMixedAmountPrecision :: Int -> MixedAmount -> MixedAmount setMixedAmountPrecision p (Mixed as) = Mixed $ map (setAmountPrecision p) as -- | Get the string representation of a mixed amount, showing each of its -- component amounts with the specified precision, ignoring their -- commoditys' display precision settings. showMixedAmountWithPrecision :: Int -> MixedAmount -> String showMixedAmountWithPrecision p m = vConcatRightAligned $ map (showAmountWithPrecision p) $ amounts $ normaliseMixedAmountSquashPricesForDisplay m -- | Get an unambiguous string representation of a mixed amount for debugging. showMixedAmountDebug :: MixedAmount -> String showMixedAmountDebug m | m == missingmixedamt = "(missing)" | otherwise = printf "Mixed [%s]" as where as = intercalate "\n " $ map showAmountDebug $ amounts m -- | Get the string representation of a mixed amount, but without -- any \@ prices. showMixedAmountWithoutPrice :: MixedAmount -> String showMixedAmountWithoutPrice m = concat $ intersperse "\n" $ map showfixedwidth as where (Mixed as) = normaliseMixedAmountSquashPricesForDisplay $ stripPrices m stripPrices (Mixed as) = Mixed $ map stripprice as where stripprice a = a{aprice=NoPrice} width = maximum $ map (length . showAmount) as showfixedwidth = printf (printf "%%%ds" width) . showAmountWithoutPrice -- | Get the one-line string representation of a mixed amount, but without -- any \@ prices. showMixedAmountOneLineWithoutPrice :: MixedAmount -> String showMixedAmountOneLineWithoutPrice m = concat $ intersperse ", " $ map showAmountWithoutPrice as where (Mixed as) = normaliseMixedAmountSquashPricesForDisplay $ stripPrices m stripPrices (Mixed as) = Mixed $ map stripprice as where stripprice a = a{aprice=NoPrice} -- | Canonicalise a mixed amount's display styles using the provided commodity style map. canonicaliseMixedAmount :: M.Map CommoditySymbol AmountStyle -> MixedAmount -> MixedAmount canonicaliseMixedAmount styles (Mixed as) = Mixed $ map (canonicaliseAmount styles) as ------------------------------------------------------------------------------- -- misc tests_Hledger_Data_Amount = TestList $ tests_normaliseMixedAmount ++ tests_normaliseMixedAmountSquashPricesForDisplay ++ [ -- Amount "costOfAmount" ~: do costOfAmount (eur 1) `is` eur 1 costOfAmount (eur 2){aprice=UnitPrice $ usd 2} `is` usd 4 costOfAmount (eur 1){aprice=TotalPrice $ usd 2} `is` usd 2 costOfAmount (eur (-1)){aprice=TotalPrice $ usd 2} `is` usd (-2) ,"isZeroAmount" ~: do assertBool "" $ isZeroAmount $ amount assertBool "" $ isZeroAmount $ usd 0 ,"negating amounts" ~: do let a = usd 1 negate a `is` a{aquantity=(-1)} let b = (usd 1){aprice=UnitPrice $ eur 2} negate b `is` b{aquantity=(-1)} ,"adding amounts without prices" ~: do let a1 = usd 1.23 let a2 = usd (-1.23) let a3 = usd (-1.23) (a1 + a2) `is` usd 0 (a1 + a3) `is` usd 0 (a2 + a3) `is` usd (-2.46) (a3 + a3) `is` usd (-2.46) sum [a1,a2,a3,-a3] `is` usd 0 -- highest precision is preserved let ap1 = usd 1 `withPrecision` 1 ap3 = usd 1 `withPrecision` 3 (asprecision $ astyle $ sum [ap1,ap3]) `is` 3 (asprecision $ astyle $ sum [ap3,ap1]) `is` 3 -- adding different commodities assumes conversion rate 1 assertBool "" $ isZeroAmount (a1 - eur 1.23) ,"showAmount" ~: do showAmount (usd 0 + gbp 0) `is` "0" -- MixedAmount ,"adding mixed amounts to zero, the commodity and amount style are preserved" ~: do (sum $ map (Mixed . (:[])) [usd 1.25 ,usd (-1) `withPrecision` 3 ,usd (-0.25) ]) `is` Mixed [usd 0 `withPrecision` 3] ,"adding mixed amounts with total prices" ~: do (sum $ map (Mixed . (:[])) [usd 1 @@ eur 1 ,usd (-2) @@ eur 1 ]) `is` (Mixed [usd 1 @@ eur 1 ,usd (-2) @@ eur 1 ]) ,"showMixedAmount" ~: do showMixedAmount (Mixed [usd 1]) `is` "$1.00" showMixedAmount (Mixed [usd 1 `at` eur 2]) `is` "$1.00 @ €2.00" showMixedAmount (Mixed [usd 0]) `is` "0" showMixedAmount (Mixed []) `is` "0" showMixedAmount missingmixedamt `is` "" ,"showMixedAmountWithoutPrice" ~: do let a = usd 1 `at` eur 2 showMixedAmountWithoutPrice (Mixed [a]) `is` "$1.00" showMixedAmountWithoutPrice (Mixed [a, (-a)]) `is` "0" ] hledger-lib-1.2/Hledger/Data/AutoTransaction.hs0000644000000000000000000001325013066173044017622 0ustar0000000000000000{-# LANGUAGE OverloadedStrings, ViewPatterns #-} {-| This module provides utilities for applying automated transactions like 'ModifierTransaction' and 'PeriodicTransaction'. -} module Hledger.Data.AutoTransaction ( -- * Transaction processors runModifierTransaction , runPeriodicTransaction -- * Accessors , mtvaluequery , jdatespan ) where import Data.Maybe import Data.Monoid ((<>)) import Data.Time.Calendar import qualified Data.Text as T import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Amount import Hledger.Data.Transaction import Hledger.Utils.Parse import Hledger.Utils.UTF8IOCompat (error') import Hledger.Query -- $setup -- >>> :set -XOverloadedStrings -- >>> import Hledger.Data.Posting -- >>> import Hledger.Data.Journal -- | Builds a 'Transaction' transformer based on 'ModifierTransaction'. -- -- 'Query' parameter allows injection of additional restriction on posting -- match. Don't forget to call 'txnTieKnot'. -- -- >>> runModifierTransaction Any (ModifierTransaction "" ["pong" `post` usd 2]) nulltransaction{tpostings=["ping" `post` usd 1]} -- 0000/01/01 -- ping $1.00 -- pong $2.00 -- -- -- >>> runModifierTransaction Any (ModifierTransaction "miss" ["pong" `post` usd 2]) nulltransaction{tpostings=["ping" `post` usd 1]} -- 0000/01/01 -- ping $1.00 -- -- -- >>> runModifierTransaction None (ModifierTransaction "" ["pong" `post` usd 2]) nulltransaction{tpostings=["ping" `post` usd 1]} -- 0000/01/01 -- ping $1.00 -- -- -- >>> runModifierTransaction Any (ModifierTransaction "ping" ["pong" `post` amount{acommodity="*", aquantity=3}]) nulltransaction{tpostings=["ping" `post` usd 2]} -- 0000/01/01 -- ping $2.00 -- pong $6.00 -- -- runModifierTransaction :: Query -> ModifierTransaction -> (Transaction -> Transaction) runModifierTransaction q mt = modifier where q' = simplifyQuery $ And [q, mtvaluequery mt (error "query cannot depend on current time")] mods = map runModifierPosting $ mtpostings mt generatePostings ps = [m p | p <- ps, q' `matchesPosting` p, m <- mods] modifier t@(tpostings -> ps) = t { tpostings = ps ++ generatePostings ps } -- | Extract 'Query' equivalent of 'mtvalueexpr' from 'ModifierTransaction' -- -- >>> mtvaluequery (ModifierTransaction "" []) undefined -- Any -- >>> mtvaluequery (ModifierTransaction "ping" []) undefined -- Acct "ping" -- >>> mtvaluequery (ModifierTransaction "date:2016" []) undefined -- Date (DateSpan 2016) -- >>> mtvaluequery (ModifierTransaction "date:today" []) (read "2017-01-01") -- Date (DateSpan 2017/01/01) mtvaluequery :: ModifierTransaction -> (Day -> Query) mtvaluequery mt = fst . flip parseQuery (mtvalueexpr mt) -- | 'DateSpan' of all dates mentioned in 'Journal' -- -- >>> jdatespan nulljournal -- DateSpan - -- >>> jdatespan nulljournal{jtxns=[nulltransaction{tdate=read "2016-01-01"}] } -- DateSpan 2016/01/01 -- >>> jdatespan nulljournal{jtxns=[nulltransaction{tdate=read "2016-01-01", tpostings=[nullposting{pdate=Just $ read "2016-02-01"}]}] } -- DateSpan 2016/01/01-2016/02/01 jdatespan :: Journal -> DateSpan jdatespan j | null dates = nulldatespan | otherwise = DateSpan (Just $ minimum dates) (Just $ 1 `addDays` maximum dates) where dates = concatMap tdates $ jtxns j -- | 'DateSpan' of all dates mentioned in 'Transaction' -- -- >>> tdates nulltransaction -- [0000-01-01] tdates :: Transaction -> [Day] tdates t = tdate t : concatMap pdates (tpostings t) ++ maybeToList (tdate2 t) where pdates p = catMaybes [pdate p, pdate2 p] postingScale :: Posting -> Maybe Quantity postingScale p = case amounts $ pamount p of [a] | acommodity a == "*" -> Just $ aquantity a _ -> Nothing runModifierPosting :: Posting -> (Posting -> Posting) runModifierPosting p' = modifier where modifier p = renderPostingCommentDates $ p' { pdate = pdate p , pdate2 = pdate2 p , pamount = amount' p } amount' = case postingScale p' of Nothing -> const $ pamount p' Just n -> \p -> pamount p `divideMixedAmount` (1/n) renderPostingCommentDates :: Posting -> Posting renderPostingCommentDates p = p { pcomment = comment' } where datesComment = T.concat $ catMaybes [T.pack . showDate <$> pdate p, ("=" <>) . T.pack . showDate <$> pdate2 p] comment' | T.null datesComment = pcomment p | otherwise = T.intercalate "\n" $ filter (not . T.null) [T.strip $ pcomment p, "[" <> datesComment <> "]"] -- | Generate transactions from 'PeriodicTransaction' within a 'DateSpan' -- -- Note that new transactions require 'txnTieKnot' post-processing. -- -- >>> mapM_ (putStr . show) $ runPeriodicTransaction (PeriodicTransaction "monthly from 2017/1 to 2017/4" ["hi" `post` usd 1]) nulldatespan -- 2017/01/01 -- hi $1.00 -- -- 2017/02/01 -- hi $1.00 -- -- 2017/03/01 -- hi $1.00 -- runPeriodicTransaction :: PeriodicTransaction -> (DateSpan -> [Transaction]) runPeriodicTransaction pt = generate where base = nulltransaction { tpostings = ptpostings pt } periodExpr = ptperiodicexpr pt errCurrent = error' $ "Current date cannot be referenced in " ++ show (T.unpack periodExpr) (interval, effectspan) = case parsePeriodExpr errCurrent periodExpr of Left e -> error' $ "Failed to parse " ++ show (T.unpack periodExpr) ++ ": " ++ showDateParseError e Right x -> x generate jspan = [base {tdate=date} | span <- interval `splitSpan` spanIntersect effectspan jspan, let Just date = spanStart span] hledger-lib-1.2/Hledger/Data/Commodity.hs0000644000000000000000000000440013035210046016433 0ustar0000000000000000{-| A 'Commodity' is a symbol representing a currency or some other kind of thing we are tracking, and some display preferences that tell how to display 'Amount's of the commodity - is the symbol on the left or right, are thousands separated by comma, significant decimal places and so on. -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Commodity where import Data.List import Data.Maybe (fromMaybe) import Data.Monoid -- import Data.Text (Text) import qualified Data.Text as T import Test.HUnit -- import qualified Data.Map as M import Hledger.Data.Types import Hledger.Utils -- characters that may not be used in a non-quoted commodity symbol nonsimplecommoditychars = "0123456789-+.@;\n \"{}=" :: [Char] quoteCommoditySymbolIfNeeded s | any (`elem` nonsimplecommoditychars) (T.unpack s) = "\"" <> s <> "\"" | otherwise = s commodity = "" -- handy constructors for tests -- unknown = commodity -- usd = "$" -- eur = "€" -- gbp = "£" -- hour = "h" -- Some sample commodity' names and symbols, for use in tests.. commoditysymbols = [("unknown","") ,("usd","$") ,("eur","€") ,("gbp","£") ,("hour","h") ] -- | Look up one of the sample commodities' symbol by name. comm :: String -> CommoditySymbol comm name = snd $ fromMaybe (error' "commodity lookup failed") (find (\n -> fst n == name) commoditysymbols) -- | Find the conversion rate between two commodities. Currently returns 1. conversionRate :: CommoditySymbol -> CommoditySymbol -> Double conversionRate _ _ = 1 -- -- | Convert a list of commodities to a map from commodity symbols to -- -- unique, display-preference-canonicalised commodities. -- canonicaliseCommodities :: [CommoditySymbol] -> Map.Map String CommoditySymbol -- canonicaliseCommodities cs = -- Map.fromList [(s,firstc{precision=maxp}) | s <- symbols, -- let cs = commoditymap ! s, -- let firstc = head cs, -- let maxp = maximum $ map precision cs -- ] -- where -- commoditymap = Map.fromList [(s, commoditieswithsymbol s) | s <- symbols] -- commoditieswithsymbol s = filter ((s==) . symbol) cs -- symbols = nub $ map symbol cs tests_Hledger_Data_Commodity = TestList [ ] hledger-lib-1.2/Hledger/Data/Dates.hs0000644000000000000000000007042613035510426015547 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE NoMonoLocalBinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE OverloadedStrings #-} {-| Date parsing and utilities for hledger. For date and time values, we use the standard Day and UTCTime types. A 'SmartDate' is a date which may be partially-specified or relative. Eg 2008\/12\/31, but also 2008\/12, 12\/31, tomorrow, last week, next year. We represent these as a triple of strings like (\"2008\",\"12\",\"\"), (\"\",\"\",\"tomorrow\"), (\"\",\"last\",\"week\"). A 'DateSpan' is the span of time between two specific calendar dates, or an open-ended span where one or both dates are unspecified. (A date span with both ends unspecified matches all dates.) An 'Interval' is ledger's \"reporting interval\" - weekly, monthly, quarterly, etc. 'Period' will probably replace DateSpan in due course. -} -- XXX fromGregorian silently clips bad dates, use fromGregorianValid instead ? module Hledger.Data.Dates ( -- * Misc date handling utilities getCurrentDay, getCurrentMonth, getCurrentYear, nulldate, spanContainsDate, periodContainsDate, parsedateM, parsedate, showDate, showDateSpan, elapsedSeconds, prevday, parsePeriodExpr, nulldatespan, failIfInvalidYear, failIfInvalidMonth, failIfInvalidDay, datesepchar, datesepchars, spanStart, spanEnd, spansSpan, spanIntersect, spansIntersect, spanDefaultsFrom, spanUnion, spansUnion, smartdate, splitSpan, fixSmartDate, fixSmartDateStr, fixSmartDateStrEither, fixSmartDateStrEither', daysInSpan, maybePeriod, mkdatespan, ) where import Prelude () import Prelude.Compat import Control.Monad import Data.List.Compat import Data.Maybe import Data.Text (Text) import qualified Data.Text as T #if MIN_VERSION_time(1,5,0) import Data.Time.Format hiding (months) #else import Data.Time.Format import System.Locale (TimeLocale, defaultTimeLocale) #endif import Data.Time.Calendar import Data.Time.Calendar.OrdinalDate import Data.Time.Clock import Data.Time.LocalTime import Safe (headMay, lastMay, readMay) import Text.Megaparsec import Text.Megaparsec.Text import Text.Printf import Hledger.Data.Types import Hledger.Data.Period import Hledger.Utils -- Help ppShow parse and line-wrap DateSpans better in debug output. instance Show DateSpan where show s = "DateSpan " ++ showDateSpan s -- show s = "DateSpan \"" ++ showDateSpan s ++ "\"" -- quotes to help pretty-show showDate :: Day -> String showDate = formatTime defaultTimeLocale "%0C%y/%m/%d" -- | Render a datespan as a display string, abbreviating into a -- compact form if possible. showDateSpan :: DateSpan -> String showDateSpan = showPeriod . dateSpanAsPeriod -- | Get the current local date. getCurrentDay :: IO Day getCurrentDay = do t <- getZonedTime return $ localDay (zonedTimeToLocalTime t) -- | Get the current local month number. getCurrentMonth :: IO Int getCurrentMonth = do (_,m,_) <- toGregorian `fmap` getCurrentDay return m -- | Get the current local year. getCurrentYear :: IO Integer getCurrentYear = do (y,_,_) <- toGregorian `fmap` getCurrentDay return y elapsedSeconds :: Fractional a => UTCTime -> UTCTime -> a elapsedSeconds t1 = realToFrac . diffUTCTime t1 spanStart :: DateSpan -> Maybe Day spanStart (DateSpan d _) = d spanEnd :: DateSpan -> Maybe Day spanEnd (DateSpan _ d) = d -- might be useful later: http://en.wikipedia.org/wiki/Allen%27s_interval_algebra -- | Get overall span enclosing multiple sequentially ordered spans. spansSpan :: [DateSpan] -> DateSpan spansSpan spans = DateSpan (maybe Nothing spanStart $ headMay spans) (maybe Nothing spanEnd $ lastMay spans) -- | Split a DateSpan into one or more consecutive whole spans of the specified length which enclose it. -- If no interval is specified, the original span is returned. -- -- ==== Examples: -- >>> let t i d1 d2 = splitSpan i $ mkdatespan d1 d2 -- >>> t NoInterval "2008/01/01" "2009/01/01" -- [DateSpan 2008] -- >>> t (Quarters 1) "2008/01/01" "2009/01/01" -- [DateSpan 2008q1,DateSpan 2008q2,DateSpan 2008q3,DateSpan 2008q4] -- >>> splitSpan (Quarters 1) nulldatespan -- [DateSpan -] -- >>> t (Days 1) "2008/01/01" "2008/01/01" -- an empty datespan -- [DateSpan 2008/01/01-2007/12/31] -- >>> t (Quarters 1) "2008/01/01" "2008/01/01" -- [DateSpan 2008/01/01-2007/12/31] -- >>> t (Months 1) "2008/01/01" "2008/04/01" -- [DateSpan 2008/01,DateSpan 2008/02,DateSpan 2008/03] -- >>> t (Months 2) "2008/01/01" "2008/04/01" -- [DateSpan 2008/01/01-2008/02/29,DateSpan 2008/03/01-2008/04/30] -- >>> t (Weeks 1) "2008/01/01" "2008/01/15" -- [DateSpan 2007/12/31w01,DateSpan 2008/01/07w02,DateSpan 2008/01/14w03] -- >>> t (Weeks 2) "2008/01/01" "2008/01/15" -- [DateSpan 2007/12/31-2008/01/13,DateSpan 2008/01/14-2008/01/27] -- >>> t (DayOfMonth 2) "2008/01/01" "2008/04/01" -- [DateSpan 2008/01/02-2008/02/01,DateSpan 2008/02/02-2008/03/01,DateSpan 2008/03/02-2008/04/01] -- >>> t (DayOfWeek 2) "2011/01/01" "2011/01/15" -- [DateSpan 2011/01/04-2011/01/10,DateSpan 2011/01/11-2011/01/17] -- splitSpan :: Interval -> DateSpan -> [DateSpan] splitSpan _ (DateSpan Nothing Nothing) = [DateSpan Nothing Nothing] splitSpan NoInterval s = [s] splitSpan (Days n) s = splitspan startofday (applyN n nextday) s splitSpan (Weeks n) s = splitspan startofweek (applyN n nextweek) s splitSpan (Months n) s = splitspan startofmonth (applyN n nextmonth) s splitSpan (Quarters n) s = splitspan startofquarter (applyN n nextquarter) s splitSpan (Years n) s = splitspan startofyear (applyN n nextyear) s splitSpan (DayOfMonth n) s = splitspan (nthdayofmonthcontaining n) (applyN (n-1) nextday . nextmonth) s splitSpan (DayOfWeek n) s = splitspan (nthdayofweekcontaining n) (applyN (n-1) nextday . nextweek) s -- splitSpan (WeekOfYear n) s = splitspan startofweek (applyN n nextweek) s -- splitSpan (MonthOfYear n) s = splitspan startofmonth (applyN n nextmonth) s -- splitSpan (QuarterOfYear n) s = splitspan startofquarter (applyN n nextquarter) s -- Split the given span using the provided helper functions: -- start is applied to the span's start date to get the first sub-span's start date -- next is applied to a sub-span's start date to get the next sub-span's start date splitspan :: (Day -> Day) -> (Day -> Day) -> DateSpan -> [DateSpan] splitspan _ _ (DateSpan Nothing Nothing) = [] splitspan start next (DateSpan Nothing (Just e)) = splitspan start next (DateSpan (Just $ start e) (Just $ next $ start e)) splitspan start next (DateSpan (Just s) Nothing) = splitspan start next (DateSpan (Just $ start s) (Just $ next $ start s)) splitspan start next span@(DateSpan (Just s) (Just e)) | s == e = [span] | otherwise = splitspan' start next span where splitspan' start next (DateSpan (Just s) (Just e)) | s >= e = [] | otherwise = DateSpan (Just subs) (Just sube) : splitspan' start next (DateSpan (Just sube) (Just e)) where subs = start s sube = next subs splitspan' _ _ _ = error' "won't happen, avoids warnings" -- | Count the days in a DateSpan, or if it is open-ended return Nothing. daysInSpan :: DateSpan -> Maybe Integer daysInSpan (DateSpan (Just d1) (Just d2)) = Just $ diffDays d2 d1 daysInSpan _ = Nothing -- | Does the span include the given date ? spanContainsDate :: DateSpan -> Day -> Bool spanContainsDate (DateSpan Nothing Nothing) _ = True spanContainsDate (DateSpan Nothing (Just e)) d = d < e spanContainsDate (DateSpan (Just b) Nothing) d = d >= b spanContainsDate (DateSpan (Just b) (Just e)) d = d >= b && d < e -- | Does the period include the given date ? -- (Here to avoid import cycle). periodContainsDate :: Period -> Day -> Bool periodContainsDate p = spanContainsDate (periodAsDateSpan p) -- | Calculate the intersection of a number of datespans. spansIntersect [] = nulldatespan spansIntersect [d] = d spansIntersect (d:ds) = d `spanIntersect` (spansIntersect ds) -- | Calculate the intersection of two datespans. spanIntersect (DateSpan b1 e1) (DateSpan b2 e2) = DateSpan b e where b = latest b1 b2 e = earliest e1 e2 -- | Fill any unspecified dates in the first span with the dates from -- the second one. Sort of a one-way spanIntersect. spanDefaultsFrom (DateSpan a1 b1) (DateSpan a2 b2) = DateSpan a b where a = if isJust a1 then a1 else a2 b = if isJust b1 then b1 else b2 -- | Calculate the union of a number of datespans. spansUnion [] = nulldatespan spansUnion [d] = d spansUnion (d:ds) = d `spanUnion` (spansUnion ds) -- | Calculate the union of two datespans. spanUnion (DateSpan b1 e1) (DateSpan b2 e2) = DateSpan b e where b = earliest b1 b2 e = latest e1 e2 latest d Nothing = d latest Nothing d = d latest (Just d1) (Just d2) = Just $ max d1 d2 earliest d Nothing = d earliest Nothing d = d earliest (Just d1) (Just d2) = Just $ min d1 d2 -- | Parse a period expression to an Interval and overall DateSpan using -- the provided reference date, or return a parse error. parsePeriodExpr :: Day -> Text -> Either (ParseError Char Dec) (Interval, DateSpan) parsePeriodExpr refdate = parsewith (periodexpr refdate <* eof) maybePeriod :: Day -> Text -> Maybe (Interval,DateSpan) maybePeriod refdate = either (const Nothing) Just . parsePeriodExpr refdate -- | Show a DateSpan as a human-readable pseudo-period-expression string. -- dateSpanAsText :: DateSpan -> String -- dateSpanAsText (DateSpan Nothing Nothing) = "all" -- dateSpanAsText (DateSpan Nothing (Just e)) = printf "to %s" (show e) -- dateSpanAsText (DateSpan (Just b) Nothing) = printf "from %s" (show b) -- dateSpanAsText (DateSpan (Just b) (Just e)) = printf "%s to %s" (show b) (show e) -- | Convert a single smart date string to a date span using the provided -- reference date, or raise an error. -- spanFromSmartDateString :: Day -> String -> DateSpan -- spanFromSmartDateString refdate s = spanFromSmartDate refdate sdate -- where -- sdate = fromparse $ parsewith smartdateonly s spanFromSmartDate :: Day -> SmartDate -> DateSpan spanFromSmartDate refdate sdate = DateSpan (Just b) (Just e) where (ry,rm,_) = toGregorian refdate (b,e) = span sdate span :: SmartDate -> (Day,Day) span ("","","today") = (refdate, nextday refdate) span ("","this","day") = (refdate, nextday refdate) span ("","","yesterday") = (prevday refdate, refdate) span ("","last","day") = (prevday refdate, refdate) span ("","","tomorrow") = (nextday refdate, addDays 2 refdate) span ("","next","day") = (nextday refdate, addDays 2 refdate) span ("","last","week") = (prevweek refdate, thisweek refdate) span ("","this","week") = (thisweek refdate, nextweek refdate) span ("","next","week") = (nextweek refdate, startofweek $ addDays 14 refdate) span ("","last","month") = (prevmonth refdate, thismonth refdate) span ("","this","month") = (thismonth refdate, nextmonth refdate) span ("","next","month") = (nextmonth refdate, startofmonth $ addGregorianMonthsClip 2 refdate) span ("","last","quarter") = (prevquarter refdate, thisquarter refdate) span ("","this","quarter") = (thisquarter refdate, nextquarter refdate) span ("","next","quarter") = (nextquarter refdate, startofquarter $ addGregorianMonthsClip 6 refdate) span ("","last","year") = (prevyear refdate, thisyear refdate) span ("","this","year") = (thisyear refdate, nextyear refdate) span ("","next","year") = (nextyear refdate, startofyear $ addGregorianYearsClip 2 refdate) span ("","",d) = (day, nextday day) where day = fromGregorian ry rm (read d) span ("",m,"") = (startofmonth day, nextmonth day) where day = fromGregorian ry (read m) 1 span ("",m,d) = (day, nextday day) where day = fromGregorian ry (read m) (read d) span (y,"","") = (startofyear day, nextyear day) where day = fromGregorian (read y) 1 1 span (y,m,"") = (startofmonth day, nextmonth day) where day = fromGregorian (read y) (read m) 1 span (y,m,d) = (day, nextday day) where day = fromGregorian (read y) (read m) (read d) -- showDay :: Day -> String -- showDay day = printf "%04d/%02d/%02d" y m d where (y,m,d) = toGregorian day -- | Convert a smart date string to an explicit yyyy\/mm\/dd string using -- the provided reference date, or raise an error. fixSmartDateStr :: Day -> Text -> String fixSmartDateStr d s = either (\e->error' $ printf "could not parse date %s %s" (show s) (show e)) id $ (fixSmartDateStrEither d s :: Either (ParseError Char Dec) String) -- | A safe version of fixSmartDateStr. fixSmartDateStrEither :: Day -> Text -> Either (ParseError Char Dec) String fixSmartDateStrEither d = either Left (Right . showDate) . fixSmartDateStrEither' d fixSmartDateStrEither' :: Day -> Text -> Either (ParseError Char Dec) Day fixSmartDateStrEither' d s = case parsewith smartdateonly (T.toLower s) of Right sd -> Right $ fixSmartDate d sd Left e -> Left e -- | Convert a SmartDate to an absolute date using the provided reference date. -- -- ==== Examples: -- >>> :set -XOverloadedStrings -- >>> let t = fixSmartDateStr (parsedate "2008/11/26") -- >>> t "0000-01-01" -- "0000/01/01" -- >>> t "1999-12-02" -- "1999/12/02" -- >>> t "1999.12.02" -- "1999/12/02" -- >>> t "1999/3/2" -- "1999/03/02" -- >>> t "19990302" -- "1999/03/02" -- >>> t "2008/2" -- "2008/02/01" -- >>> t "0020/2" -- "0020/02/01" -- >>> t "1000" -- "1000/01/01" -- >>> t "4/2" -- "2008/04/02" -- >>> t "2" -- "2008/11/02" -- >>> t "January" -- "2008/01/01" -- >>> t "feb" -- "2008/02/01" -- >>> t "today" -- "2008/11/26" -- >>> t "yesterday" -- "2008/11/25" -- >>> t "tomorrow" -- "2008/11/27" -- >>> t "this day" -- "2008/11/26" -- >>> t "last day" -- "2008/11/25" -- >>> t "next day" -- "2008/11/27" -- >>> t "this week" -- last monday -- "2008/11/24" -- >>> t "last week" -- previous monday -- "2008/11/17" -- >>> t "next week" -- next monday -- "2008/12/01" -- >>> t "this month" -- "2008/11/01" -- >>> t "last month" -- "2008/10/01" -- >>> t "next month" -- "2008/12/01" -- >>> t "this quarter" -- "2008/10/01" -- >>> t "last quarter" -- "2008/07/01" -- >>> t "next quarter" -- "2009/01/01" -- >>> t "this year" -- "2008/01/01" -- >>> t "last year" -- "2007/01/01" -- >>> t "next year" -- "2009/01/01" -- -- t "last wed" -- "2008/11/19" -- t "next friday" -- "2008/11/28" -- t "next january" -- "2009/01/01" -- fixSmartDate :: Day -> SmartDate -> Day fixSmartDate refdate sdate = fix sdate where fix :: SmartDate -> Day fix ("","","today") = fromGregorian ry rm rd fix ("","this","day") = fromGregorian ry rm rd fix ("","","yesterday") = prevday refdate fix ("","last","day") = prevday refdate fix ("","","tomorrow") = nextday refdate fix ("","next","day") = nextday refdate fix ("","last","week") = prevweek refdate fix ("","this","week") = thisweek refdate fix ("","next","week") = nextweek refdate fix ("","last","month") = prevmonth refdate fix ("","this","month") = thismonth refdate fix ("","next","month") = nextmonth refdate fix ("","last","quarter") = prevquarter refdate fix ("","this","quarter") = thisquarter refdate fix ("","next","quarter") = nextquarter refdate fix ("","last","year") = prevyear refdate fix ("","this","year") = thisyear refdate fix ("","next","year") = nextyear refdate fix ("","",d) = fromGregorian ry rm (read d) fix ("",m,"") = fromGregorian ry (read m) 1 fix ("",m,d) = fromGregorian ry (read m) (read d) fix (y,"","") = fromGregorian (read y) 1 1 fix (y,m,"") = fromGregorian (read y) (read m) 1 fix (y,m,d) = fromGregorian (read y) (read m) (read d) (ry,rm,rd) = toGregorian refdate prevday :: Day -> Day prevday = addDays (-1) nextday = addDays 1 startofday = id thisweek = startofweek prevweek = startofweek . addDays (-7) nextweek = startofweek . addDays 7 startofweek day = fromMondayStartWeek y w 1 where (y,_,_) = toGregorian day (w,_) = mondayStartWeek day thismonth = startofmonth prevmonth = startofmonth . addGregorianMonthsClip (-1) nextmonth = startofmonth . addGregorianMonthsClip 1 startofmonth day = fromGregorian y m 1 where (y,m,_) = toGregorian day thisquarter = startofquarter prevquarter = startofquarter . addGregorianMonthsClip (-3) nextquarter = startofquarter . addGregorianMonthsClip 3 startofquarter day = fromGregorian y (firstmonthofquarter m) 1 where (y,m,_) = toGregorian day firstmonthofquarter m = ((m-1) `div` 3) * 3 + 1 thisyear = startofyear prevyear = startofyear . addGregorianYearsClip (-1) nextyear = startofyear . addGregorianYearsClip 1 startofyear day = fromGregorian y 1 1 where (y,_,_) = toGregorian day nthdayofmonthcontaining n d | d1 >= d = d1 | otherwise = d2 where d1 = addDays (fromIntegral n-1) s d2 = addDays (fromIntegral n-1) $ nextmonth s s = startofmonth d nthdayofweekcontaining n d | d1 >= d = d1 | otherwise = d2 where d1 = addDays (fromIntegral n-1) s d2 = addDays (fromIntegral n-1) $ nextweek s s = startofweek d ---------------------------------------------------------------------- -- parsing -- -- | Parse a couple of date-time string formats to a time type. -- parsedatetimeM :: String -> Maybe LocalTime -- parsedatetimeM s = firstJust [ -- parseTime defaultTimeLocale "%Y/%m/%d %H:%M:%S" s, -- parseTime defaultTimeLocale "%Y-%m-%d %H:%M:%S" s -- ] parsetime :: ParseTime t => TimeLocale -> String -> String -> Maybe t parsetime = #if MIN_VERSION_time(1,5,0) parseTimeM True #else parseTime #endif -- | Parse a couple of date string formats to a time type. parsedateM :: String -> Maybe Day parsedateM s = firstJust [ parsetime defaultTimeLocale "%Y/%m/%d" s, parsetime defaultTimeLocale "%Y-%m-%d" s ] -- -- | Parse a date-time string to a time type, or raise an error. -- parsedatetime :: String -> LocalTime -- parsedatetime s = fromMaybe (error' $ "could not parse timestamp \"" ++ s ++ "\"") -- (parsedatetimeM s) -- | Parse a YYYY-MM-DD or YYYY/MM/DD date string to a Day, or raise an error. For testing/debugging. -- -- >>> parsedate "2008/02/03" -- 2008-02-03 parsedate :: String -> Day parsedate s = fromMaybe (error' $ "could not parse date \"" ++ s ++ "\"") (parsedateM s) -- doctests I haven't been able to make compatible with both GHC 7 and 8 -- -- >>> parsedate "2008/02/03/" -- -- *** Exception: could not parse date "2008/02/03/" -- #if MIN_VERSION_base(4,9,0) -- -- ... -- #endif -- #if MIN_VERSION_time(1,6,0) -- -- >>> parsedate "2008/02/30" -- with time >= 1.6, invalid dates are rejected -- -- *** Exception: could not parse date "2008/02/30" -- #if MIN_VERSION_base(4,9,0) -- -- ... -- #endif -- #else -- -- >>> parsedate "2008/02/30" -- with time < 1.6, they are silently adjusted -- -- 2008-02-29 -- #endif -- | Parse a time string to a time type using the provided pattern, or -- return the default. _parsetimewith :: ParseTime t => String -> String -> t -> t _parsetimewith pat s def = fromMaybe def $ parsetime defaultTimeLocale pat s {-| Parse a date in any of the formats allowed in ledger's period expressions, and maybe some others: > 2004 > 2004/10 > 2004/10/1 > 10/1 > 21 > october, oct > yesterday, today, tomorrow > this/next/last week/day/month/quarter/year Returns a SmartDate, to be converted to a full date later (see fixSmartDate). Assumes any text in the parse stream has been lowercased. -} smartdate :: Parser SmartDate smartdate = do -- XXX maybe obscures date errors ? see ledgerdate (y,m,d) <- choice' [yyyymmdd, ymd, ym, md, y, d, month, mon, today, yesterday, tomorrow, lastthisnextthing] return (y,m,d) -- | Like smartdate, but there must be nothing other than whitespace after the date. smartdateonly :: Parser SmartDate smartdateonly = do d <- smartdate many spacenonewline eof return d datesepchars :: [Char] datesepchars = "/-." datesepchar :: TextParser m Char datesepchar = oneOf datesepchars validYear, validMonth, validDay :: String -> Bool validYear s = length s >= 4 && isJust (readMay s :: Maybe Year) validMonth s = maybe False (\n -> n>=1 && n<=12) $ readMay s validDay s = maybe False (\n -> n>=1 && n<=31) $ readMay s failIfInvalidYear, failIfInvalidMonth, failIfInvalidDay :: (Monad m) => String -> m () failIfInvalidYear s = unless (validYear s) $ fail $ "bad year number: " ++ s failIfInvalidMonth s = unless (validMonth s) $ fail $ "bad month number: " ++ s failIfInvalidDay s = unless (validDay s) $ fail $ "bad day number: " ++ s yyyymmdd :: Parser SmartDate yyyymmdd = do y <- count 4 digitChar m <- count 2 digitChar failIfInvalidMonth m d <- count 2 digitChar failIfInvalidDay d return (y,m,d) ymd :: Parser SmartDate ymd = do y <- some digitChar failIfInvalidYear y sep <- datesepchar m <- some digitChar failIfInvalidMonth m char sep d <- some digitChar failIfInvalidDay d return $ (y,m,d) ym :: Parser SmartDate ym = do y <- some digitChar failIfInvalidYear y datesepchar m <- some digitChar failIfInvalidMonth m return (y,m,"") y :: Parser SmartDate y = do y <- some digitChar failIfInvalidYear y return (y,"","") d :: Parser SmartDate d = do d <- some digitChar failIfInvalidDay d return ("","",d) md :: Parser SmartDate md = do m <- some digitChar failIfInvalidMonth m datesepchar d <- some digitChar failIfInvalidDay d return ("",m,d) months = ["january","february","march","april","may","june", "july","august","september","october","november","december"] monthabbrevs = ["jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"] -- weekdays = ["monday","tuesday","wednesday","thursday","friday","saturday","sunday"] -- weekdayabbrevs = ["mon","tue","wed","thu","fri","sat","sun"] monthIndex s = maybe 0 (+1) $ lowercase s `elemIndex` months monIndex s = maybe 0 (+1) $ lowercase s `elemIndex` monthabbrevs month :: Parser SmartDate month = do m <- choice $ map (try . string) months let i = monthIndex m return ("",show i,"") mon :: Parser SmartDate mon = do m <- choice $ map (try . string) monthabbrevs let i = monIndex m return ("",show i,"") today,yesterday,tomorrow :: Parser SmartDate today = string "today" >> return ("","","today") yesterday = string "yesterday" >> return ("","","yesterday") tomorrow = string "tomorrow" >> return ("","","tomorrow") lastthisnextthing :: Parser SmartDate lastthisnextthing = do r <- choice [ string "last" ,string "this" ,string "next" ] many spacenonewline -- make the space optional for easier scripting p <- choice [ string "day" ,string "week" ,string "month" ,string "quarter" ,string "year" ] -- XXX support these in fixSmartDate -- ++ (map string $ months ++ monthabbrevs ++ weekdays ++ weekdayabbrevs) return ("",r,p) -- | -- >>> let p = parsewith (periodexpr (parsedate "2008/11/26")) :: T.Text -> Either (ParseError Char Dec) (Interval, DateSpan) -- >>> p "from aug to oct" -- Right (NoInterval,DateSpan 2008/08/01-2008/09/30) -- >>> p "aug to oct" -- Right (NoInterval,DateSpan 2008/08/01-2008/09/30) -- >>> p "every 3 days in aug" -- Right (Days 3,DateSpan 2008/08) -- >>> p "daily from aug" -- Right (Days 1,DateSpan 2008/08/01-) -- >>> p "every week to 2009" -- Right (Weeks 1,DateSpan -2008/12/31) periodexpr :: Day -> Parser (Interval, DateSpan) periodexpr rdate = choice $ map try [ intervalanddateperiodexpr rdate, intervalperiodexpr, dateperiodexpr rdate, (return (NoInterval,DateSpan Nothing Nothing)) ] intervalanddateperiodexpr :: Day -> Parser (Interval, DateSpan) intervalanddateperiodexpr rdate = do many spacenonewline i <- reportinginterval many spacenonewline s <- periodexprdatespan rdate return (i,s) intervalperiodexpr :: Parser (Interval, DateSpan) intervalperiodexpr = do many spacenonewline i <- reportinginterval return (i, DateSpan Nothing Nothing) dateperiodexpr :: Day -> Parser (Interval, DateSpan) dateperiodexpr rdate = do many spacenonewline s <- periodexprdatespan rdate return (NoInterval, s) -- Parse a reporting interval. reportinginterval :: Parser Interval reportinginterval = choice' [ tryinterval "day" "daily" Days, tryinterval "week" "weekly" Weeks, tryinterval "month" "monthly" Months, tryinterval "quarter" "quarterly" Quarters, tryinterval "year" "yearly" Years, do string "biweekly" return $ Weeks 2, do string "bimonthly" return $ Months 2, do string "every" many spacenonewline n <- fmap read $ some digitChar thsuffix many spacenonewline string "day" many spacenonewline string "of" many spacenonewline string "week" return $ DayOfWeek n, do string "every" many spacenonewline n <- fmap read $ some digitChar thsuffix many spacenonewline string "day" optional $ do many spacenonewline string "of" many spacenonewline string "month" return $ DayOfMonth n ] where thsuffix = choice' $ map string ["st","nd","rd","th"] -- Parse any of several variants of a basic interval, eg "daily", "every day", "every N days". tryinterval :: String -> String -> (Int -> Interval) -> Parser Interval tryinterval singular compact intcons = choice' [ do string compact return $ intcons 1, do string "every" many spacenonewline string singular return $ intcons 1, do string "every" many spacenonewline n <- fmap read $ some digitChar many spacenonewline string plural return $ intcons n ] where plural = singular ++ "s" periodexprdatespan :: Day -> Parser DateSpan periodexprdatespan rdate = choice $ map try [ doubledatespan rdate, fromdatespan rdate, todatespan rdate, justdatespan rdate ] doubledatespan :: Day -> Parser DateSpan doubledatespan rdate = do optional (string "from" >> many spacenonewline) b <- smartdate many spacenonewline optional (choice [string "to", string "-"] >> many spacenonewline) e <- smartdate return $ DateSpan (Just $ fixSmartDate rdate b) (Just $ fixSmartDate rdate e) fromdatespan :: Day -> Parser DateSpan fromdatespan rdate = do b <- choice [ do string "from" >> many spacenonewline smartdate , do d <- smartdate string "-" return d ] return $ DateSpan (Just $ fixSmartDate rdate b) Nothing todatespan :: Day -> Parser DateSpan todatespan rdate = do choice [string "to", string "-"] >> many spacenonewline e <- smartdate return $ DateSpan Nothing (Just $ fixSmartDate rdate e) justdatespan :: Day -> Parser DateSpan justdatespan rdate = do optional (string "in" >> many spacenonewline) d <- smartdate return $ spanFromSmartDate rdate d -- | Make a datespan from two valid date strings parseable by parsedate -- (or raise an error). Eg: mkdatespan \"2011/1/1\" \"2011/12/31\". mkdatespan :: String -> String -> DateSpan mkdatespan b = DateSpan (Just $ parsedate b) . Just . parsedate nulldatespan :: DateSpan nulldatespan = DateSpan Nothing Nothing nulldate :: Day nulldate = fromGregorian 0 1 1 hledger-lib-1.2/Hledger/Data/Journal.hs0000644000000000000000000012716513066173044016131 0ustar0000000000000000{-# LANGUAGE Rank2Types #-} {-# LANGUAGE StandaloneDeriving, OverloadedStrings #-} {-| A 'Journal' is a set of transactions, plus optional related data. This is hledger's primary data object. It is usually parsed from a journal file or other data format (see "Hledger.Read"). -} module Hledger.Data.Journal ( -- * Parsing helpers addMarketPrice, addModifierTransaction, addPeriodicTransaction, addTransaction, journalApplyAliases, journalBalanceTransactions, journalApplyCommodityStyles, commodityStylesFromAmounts, journalConvertAmountsToCost, journalFinalise, -- * Filtering filterJournalTransactions, filterJournalPostings, filterJournalAmounts, filterTransactionAmounts, filterTransactionPostings, filterPostingAmount, -- * Querying journalAccountNames, journalAccountNamesUsed, -- journalAmountAndPriceCommodities, journalAmounts, overJournalAmounts, traverseJournalAmounts, -- journalCanonicalCommodities, journalDateSpan, journalDescriptions, journalFilePath, journalFilePaths, journalTransactionAt, journalNextTransaction, journalPrevTransaction, journalPostings, -- * Standard account types journalBalanceSheetAccountQuery, journalProfitAndLossAccountQuery, journalIncomeAccountQuery, journalExpenseAccountQuery, journalAssetAccountQuery, journalLiabilityAccountQuery, journalEquityAccountQuery, journalCashAccountQuery, -- * Misc canonicalStyleFrom, matchpats, nulljournal, journalCheckBalanceAssertions, journalNumberAndTieTransactions, journalUntieTransactions, -- * Tests samplejournal, tests_Hledger_Data_Journal, ) where import Control.Applicative (Const(..)) import Control.Arrow import Control.Monad import Control.Monad.Except import qualified Control.Monad.Reader as R import Control.Monad.ST import Data.Array.ST import Data.Functor.Identity (Identity(..)) import qualified Data.HashTable.ST.Cuckoo as HT import Data.List -- import Data.Map (findWithDefault) import Data.Maybe import Data.Monoid import Data.Ord import Data.Text (Text) import qualified Data.Text as T import Safe (headMay, headDef) import Data.Time.Calendar import Data.Tree import System.Time (ClockTime(TOD)) import Test.HUnit import Text.Printf import qualified Data.Map as M import Hledger.Utils import Hledger.Data.Types import Hledger.Data.AccountName import Hledger.Data.Amount -- import Hledger.Data.Commodity import Hledger.Data.Dates import Hledger.Data.Transaction import Hledger.Data.Posting import Hledger.Query -- try to make Journal ppShow-compatible -- instance Show ClockTime where -- show t = "" -- deriving instance Show Journal instance Show Journal where show j | debugLevel < 3 = printf "Journal %s with %d transactions, %d accounts" (journalFilePath j) (length (jtxns j) + length (jmodifiertxns j) + length (jperiodictxns j)) (length accounts) | debugLevel < 6 = printf "Journal %s with %d transactions, %d accounts: %s" (journalFilePath j) (length (jtxns j) + length (jmodifiertxns j) + length (jperiodictxns j)) (length accounts) (show accounts) | otherwise = printf "Journal %s with %d transactions, %d accounts: %s, commodity styles: %s" (journalFilePath j) (length (jtxns j) + length (jmodifiertxns j) + length (jperiodictxns j)) (length accounts) (show accounts) (show $ jinferredcommodities j) -- ++ (show $ journalTransactions l) where accounts = filter (/= "root") $ flatten $ journalAccountNameTree j -- showJournalDebug j = unlines [ -- show j -- ,show (jtxns j) -- ,show (jmodifiertxns j) -- ,show (jperiodictxns j) -- ,show $ jparsetimeclockentries j -- ,show $ jmarketprices j -- ,show $ jfinalcommentlines j -- ,show $ jparsestate j -- ,show $ map fst $ jfiles j -- ] -- The monoid instance for Journal is useful for two situations. -- -- 1. concatenating finalised journals, eg with multiple -f options: -- FIRST <> SECOND. The second's list fields are appended to the -- first's, map fields are combined, transaction counts are summed, -- the parse state of the second is kept. -- -- 2. merging a child parsed journal, eg with the include directive: -- CHILD <> PARENT. A parsed journal's data is in reverse order, so -- this gives what we want. -- instance Monoid Journal where mempty = nulljournal mappend j1 j2 = Journal { jparsedefaultyear = jparsedefaultyear j2 ,jparsedefaultcommodity = jparsedefaultcommodity j2 ,jparseparentaccounts = jparseparentaccounts j2 ,jparsealiases = jparsealiases j2 -- ,jparsetransactioncount = jparsetransactioncount j1 + jparsetransactioncount j2 ,jparsetimeclockentries = jparsetimeclockentries j1 <> jparsetimeclockentries j2 ,jaccounts = jaccounts j1 <> jaccounts j2 ,jcommodities = jcommodities j1 <> jcommodities j2 ,jinferredcommodities = jinferredcommodities j1 <> jinferredcommodities j2 ,jmarketprices = jmarketprices j1 <> jmarketprices j2 ,jmodifiertxns = jmodifiertxns j1 <> jmodifiertxns j2 ,jperiodictxns = jperiodictxns j1 <> jperiodictxns j2 ,jtxns = jtxns j1 <> jtxns j2 ,jfinalcommentlines = jfinalcommentlines j2 ,jfiles = jfiles j1 <> jfiles j2 ,jlastreadtime = max (jlastreadtime j1) (jlastreadtime j2) } nulljournal :: Journal nulljournal = Journal { jparsedefaultyear = Nothing ,jparsedefaultcommodity = Nothing ,jparseparentaccounts = [] ,jparsealiases = [] -- ,jparsetransactioncount = 0 ,jparsetimeclockentries = [] ,jaccounts = [] ,jcommodities = M.fromList [] ,jinferredcommodities = M.fromList [] ,jmarketprices = [] ,jmodifiertxns = [] ,jperiodictxns = [] ,jtxns = [] ,jfinalcommentlines = "" ,jfiles = [] ,jlastreadtime = TOD 0 0 } journalFilePath :: Journal -> FilePath journalFilePath = fst . mainfile journalFilePaths :: Journal -> [FilePath] journalFilePaths = map fst . jfiles mainfile :: Journal -> (FilePath, Text) mainfile = headDef ("", "") . jfiles addTransaction :: Transaction -> Journal -> Journal addTransaction t j = j { jtxns = t : jtxns j } addModifierTransaction :: ModifierTransaction -> Journal -> Journal addModifierTransaction mt j = j { jmodifiertxns = mt : jmodifiertxns j } addPeriodicTransaction :: PeriodicTransaction -> Journal -> Journal addPeriodicTransaction pt j = j { jperiodictxns = pt : jperiodictxns j } addMarketPrice :: MarketPrice -> Journal -> Journal addMarketPrice h j = j { jmarketprices = h : jmarketprices j } -- | Get the transaction with this index (its 1-based position in the input stream), if any. journalTransactionAt :: Journal -> Integer -> Maybe Transaction journalTransactionAt Journal{jtxns=ts} i = -- it's probably ts !! (i+1), but we won't assume headMay [t | t <- ts, tindex t == i] -- | Get the transaction that appeared immediately after this one in the input stream, if any. journalNextTransaction :: Journal -> Transaction -> Maybe Transaction journalNextTransaction j t = journalTransactionAt j (tindex t + 1) -- | Get the transaction that appeared immediately before this one in the input stream, if any. journalPrevTransaction :: Journal -> Transaction -> Maybe Transaction journalPrevTransaction j t = journalTransactionAt j (tindex t - 1) -- | Unique transaction descriptions used in this journal. journalDescriptions :: Journal -> [Text] journalDescriptions = nub . sort . map tdescription . jtxns -- | All postings from this journal's transactions, in order. journalPostings :: Journal -> [Posting] journalPostings = concatMap tpostings . jtxns -- | Unique account names posted to in this journal. journalAccountNamesUsed :: Journal -> [AccountName] journalAccountNamesUsed = sort . accountNamesFromPostings . journalPostings -- | Unique account names in this journal, including parent accounts containing no postings. journalAccountNames :: Journal -> [AccountName] journalAccountNames = sort . expandAccountNames . journalAccountNamesUsed journalAccountNameTree :: Journal -> Tree AccountName journalAccountNameTree = accountNameTreeFrom . journalAccountNames -- standard account types -- | A query for Profit & Loss accounts in this journal. -- Cf . journalProfitAndLossAccountQuery :: Journal -> Query journalProfitAndLossAccountQuery j = Or [journalIncomeAccountQuery j ,journalExpenseAccountQuery j ] -- | A query for Income (Revenue) accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^(income|revenue)s?(:|$)@. journalIncomeAccountQuery :: Journal -> Query journalIncomeAccountQuery _ = Acct "^(income|revenue)s?(:|$)" -- | A query for Expense accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^expenses?(:|$)@. journalExpenseAccountQuery :: Journal -> Query journalExpenseAccountQuery _ = Acct "^expenses?(:|$)" -- | A query for Asset, Liability & Equity accounts in this journal. -- Cf . journalBalanceSheetAccountQuery :: Journal -> Query journalBalanceSheetAccountQuery j = Or [journalAssetAccountQuery j ,journalLiabilityAccountQuery j ,journalEquityAccountQuery j ] -- | A query for Asset accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^assets?(:|$)@. journalAssetAccountQuery :: Journal -> Query journalAssetAccountQuery _ = Acct "^assets?(:|$)" -- | A query for Liability accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^(debts?|liabilit(y|ies))(:|$)@. journalLiabilityAccountQuery :: Journal -> Query journalLiabilityAccountQuery _ = Acct "^(debts?|liabilit(y|ies))(:|$)" -- | A query for Equity accounts in this journal. -- This is currently hard-coded to the case-insensitive regex @^equity(:|$)@. journalEquityAccountQuery :: Journal -> Query journalEquityAccountQuery _ = Acct "^equity(:|$)" -- | A query for Cash (-equivalent) accounts in this journal (ie, -- accounts which appear on the cashflow statement.) This is currently -- hard-coded to be all the Asset accounts except for those containing the -- case-insensitive regex @(receivable|A/R)@. journalCashAccountQuery :: Journal -> Query journalCashAccountQuery j = And [journalAssetAccountQuery j, Not $ Acct "(receivable|A/R)"] -- Various kinds of filtering on journals. We do it differently depending -- on the command. ------------------------------------------------------------------------------- -- filtering V2 -- | Keep only transactions matching the query expression. filterJournalTransactions :: Query -> Journal -> Journal filterJournalTransactions q j@Journal{jtxns=ts} = j{jtxns=filter (q `matchesTransaction`) ts} -- | Keep only postings matching the query expression. -- This can leave unbalanced transactions. filterJournalPostings :: Query -> Journal -> Journal filterJournalPostings q j@Journal{jtxns=ts} = j{jtxns=map (filterTransactionPostings q) ts} -- | Within each posting's amount, keep only the parts matching the query. -- This can leave unbalanced transactions. filterJournalAmounts :: Query -> Journal -> Journal filterJournalAmounts q j@Journal{jtxns=ts} = j{jtxns=map (filterTransactionAmounts q) ts} -- | Filter out all parts of this transaction's amounts which do not match the query. -- This can leave the transaction unbalanced. filterTransactionAmounts :: Query -> Transaction -> Transaction filterTransactionAmounts q t@Transaction{tpostings=ps} = t{tpostings=map (filterPostingAmount q) ps} -- | Filter out all parts of this posting's amount which do not match the query. filterPostingAmount :: Query -> Posting -> Posting filterPostingAmount q p@Posting{pamount=Mixed as} = p{pamount=Mixed $ filter (q `matchesAmount`) as} filterTransactionPostings :: Query -> Transaction -> Transaction filterTransactionPostings q t@Transaction{tpostings=ps} = t{tpostings=filter (q `matchesPosting`) ps} {- ------------------------------------------------------------------------------- -- filtering V1 -- | Keep only transactions we are interested in, as described by the -- filter specification. filterJournalTransactions :: FilterSpec -> Journal -> Journal filterJournalTransactions FilterSpec{datespan=datespan ,cleared=cleared -- ,real=real -- ,empty=empty ,acctpats=apats ,descpats=dpats ,depth=depth ,fMetadata=md } = filterJournalTransactionsByClearedStatus cleared . filterJournalPostingsByDepth depth . filterJournalTransactionsByAccount apats . filterJournalTransactionsByMetadata md . filterJournalTransactionsByDescription dpats . filterJournalTransactionsByDate datespan -- | Keep only postings we are interested in, as described by the filter -- specification. This can leave unbalanced transactions. filterJournalPostings :: FilterSpec -> Journal -> Journal filterJournalPostings FilterSpec{datespan=datespan ,cleared=cleared ,real=real ,empty=empty ,acctpats=apats ,descpats=dpats ,depth=depth ,fMetadata=md } = filterJournalPostingsByRealness real . filterJournalPostingsByClearedStatus cleared . filterJournalPostingsByEmpty empty . filterJournalPostingsByDepth depth . filterJournalPostingsByAccount apats . filterJournalTransactionsByMetadata md . filterJournalTransactionsByDescription dpats . filterJournalTransactionsByDate datespan -- | Keep only transactions whose metadata matches all metadata specifications. filterJournalTransactionsByMetadata :: [(String,String)] -> Journal -> Journal filterJournalTransactionsByMetadata pats j@Journal{jtxns=ts} = j{jtxns=filter matchmd ts} where matchmd t = all (`elem` tmetadata t) pats -- | Keep only transactions whose description matches the description patterns. filterJournalTransactionsByDescription :: [String] -> Journal -> Journal filterJournalTransactionsByDescription pats j@Journal{jtxns=ts} = j{jtxns=filter matchdesc ts} where matchdesc = matchpats pats . tdescription -- | Keep only transactions which fall between begin and end dates. -- We include transactions on the begin date and exclude transactions on the end -- date, like ledger. An empty date string means no restriction. filterJournalTransactionsByDate :: DateSpan -> Journal -> Journal filterJournalTransactionsByDate (DateSpan begin end) j@Journal{jtxns=ts} = j{jtxns=filter match ts} where match t = maybe True (tdate t>=) begin && maybe True (tdate t<) end -- | Keep only transactions which have the requested cleared/uncleared -- status, if there is one. filterJournalTransactionsByClearedStatus :: Maybe Bool -> Journal -> Journal filterJournalTransactionsByClearedStatus Nothing j = j filterJournalTransactionsByClearedStatus (Just val) j@Journal{jtxns=ts} = j{jtxns=filter match ts} where match = (==val).tstatus -- | Keep only postings which have the requested cleared/uncleared status, -- if there is one. filterJournalPostingsByClearedStatus :: Maybe Bool -> Journal -> Journal filterJournalPostingsByClearedStatus Nothing j = j filterJournalPostingsByClearedStatus (Just c) j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter ((==c) . postingCleared) ps} -- | Strip out any virtual postings, if the flag is true, otherwise do -- no filtering. filterJournalPostingsByRealness :: Bool -> Journal -> Journal filterJournalPostingsByRealness False j = j filterJournalPostingsByRealness True j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter isReal ps} -- | Strip out any postings with zero amount, unless the flag is true. filterJournalPostingsByEmpty :: Bool -> Journal -> Journal filterJournalPostingsByEmpty True j = j filterJournalPostingsByEmpty False j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter (not . isEmptyPosting) ps} -- -- | Keep only transactions which affect accounts deeper than the specified depth. -- filterJournalTransactionsByDepth :: Maybe Int -> Journal -> Journal -- filterJournalTransactionsByDepth Nothing j = j -- filterJournalTransactionsByDepth (Just d) j@Journal{jtxns=ts} = -- j{jtxns=(filter (any ((<= d+1) . accountNameLevel . paccount) . tpostings) ts)} -- | Strip out any postings to accounts deeper than the specified depth -- (and any transactions which have no postings as a result). filterJournalPostingsByDepth :: Maybe Int -> Journal -> Journal filterJournalPostingsByDepth Nothing j = j filterJournalPostingsByDepth (Just d) j@Journal{jtxns=ts} = j{jtxns=filter (not . null . tpostings) $ map filtertxns ts} where filtertxns t@Transaction{tpostings=ps} = t{tpostings=filter ((<= d) . accountNameLevel . paccount) ps} -- | Keep only postings which affect accounts matched by the account patterns. -- This can leave transactions unbalanced. filterJournalPostingsByAccount :: [String] -> Journal -> Journal filterJournalPostingsByAccount apats j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter (matchpats apats . paccount) ps} -- | Keep only transactions which affect accounts matched by the account patterns. -- More precisely: each positive account pattern excludes transactions -- which do not contain a posting to a matched account, and each negative -- account pattern excludes transactions containing a posting to a matched -- account. filterJournalTransactionsByAccount :: [String] -> Journal -> Journal filterJournalTransactionsByAccount apats j@Journal{jtxns=ts} = j{jtxns=filter tmatch ts} where tmatch t = (null positives || any positivepmatch ps) && (null negatives || not (any negativepmatch ps)) where ps = tpostings t positivepmatch p = any (`amatch` a) positives where a = paccount p negativepmatch p = any (`amatch` a) negatives where a = paccount p amatch pat a = regexMatchesCI (abspat pat) a (negatives,positives) = partition isnegativepat apats -} -- | Apply additional account aliases (eg from the command-line) to all postings in a journal. journalApplyAliases :: [AccountAlias] -> Journal -> Journal journalApplyAliases aliases j@Journal{jtxns=ts} = -- (if null aliases -- then id -- else (dbgtrace $ -- "applying additional command-line aliases:\n" -- ++ chomp (unlines $ map (" "++) $ lines $ ppShow aliases))) $ j{jtxns=map dotransaction ts} where dotransaction t@Transaction{tpostings=ps} = t{tpostings=map doposting ps} doposting p@Posting{paccount=a} = p{paccount= accountNameApplyAliases aliases a} -- | Do post-parse processing on a parsed journal to make it ready for -- use. Reverse parsed data to normal order, canonicalise amount -- formats, check/ensure that transactions are balanced, and maybe -- check balance assertions. journalFinalise :: ClockTime -> FilePath -> Text -> Bool -> ParsedJournal -> Either String Journal journalFinalise t path txt assrt j@Journal{jfiles=fs} = do (journalTieTransactions <$> (journalBalanceTransactions assrt $ journalApplyCommodityStyles $ j{ jfiles = (path,txt) : reverse fs , jlastreadtime = t , jtxns = reverse $ jtxns j -- NOTE: see addTransaction , jmodifiertxns = reverse $ jmodifiertxns j -- NOTE: see addModifierTransaction , jperiodictxns = reverse $ jperiodictxns j -- NOTE: see addPeriodicTransaction , jmarketprices = reverse $ jmarketprices j -- NOTE: see addMarketPrice })) journalNumberAndTieTransactions = journalTieTransactions . journalNumberTransactions -- | Number (set the tindex field) this journal's transactions, counting upward from 1. journalNumberTransactions :: Journal -> Journal journalNumberTransactions j@Journal{jtxns=ts} = j{jtxns=map (\(i,t) -> t{tindex=i}) $ zip [1..] ts} -- | Tie the knot in all of this journal's transactions, ensuring their postings -- refer to them. This should be done last, after any other transaction-modifying operations. journalTieTransactions :: Journal -> Journal journalTieTransactions j@Journal{jtxns=ts} = j{jtxns=map txnTieKnot ts} -- | Untie all transaction-posting knots in this journal, so that eg -- recursiveSize and GHCI's :sprint can work on it. journalUntieTransactions :: Transaction -> Transaction journalUntieTransactions t@Transaction{tpostings=ps} = t{tpostings=map (\p -> p{ptransaction=Nothing}) ps} -- | Check any balance assertions in the journal and return an error -- message if any of them fail. journalCheckBalanceAssertions :: Journal -> Either String Journal journalCheckBalanceAssertions j = runST $ journalBalanceTransactionsST True j (return ()) (\_ _ -> return ()) (const $ return j) -- noops -- | Check a posting's balance assertion and return an error if it -- fails. checkBalanceAssertion :: Posting -> MixedAmount -> Either String () checkBalanceAssertion p@Posting{ pbalanceassertion = Just ass} amt | isReallyZeroAmount diff = Right () | True = Left err where assertedcomm = acommodity ass actualbal = fromMaybe nullamt $ find ((== assertedcomm) . acommodity) (amounts amt) diff = ass - actualbal diffplus | isNegativeAmount diff == False = "+" | otherwise = "" err = printf (unlines [ "balance assertion error%s", "after posting:", "%s", "balance assertion details:", "date: %s", "account: %s", "commodity: %s", "calculated: %s", "asserted: %s (difference: %s)" ]) (case ptransaction p of Nothing -> ":" -- shouldn't happen Just t -> printf " in %s:\nin transaction:\n%s" (showGenericSourcePos $ tsourcepos t) (chomp $ show t) :: String) (showPostingLine p) (showDate $ postingDate p) (T.unpack $ paccount p) -- XXX pack assertedcomm (showAmount actualbal) (showAmount ass) (diffplus ++ showAmount diff) checkBalanceAssertion _ _ = Right () -- | Environment for 'CurrentBalancesModifier' data Env s = Env { eBalances :: HT.HashTable s AccountName MixedAmount , eStoreTx :: Transaction -> ST s () , eAssrt :: Bool , eStyles :: Maybe (M.Map CommoditySymbol AmountStyle) } -- | Monad transformer stack with a reference to a mutable hashtable -- of current account balances and a mutable array of finished -- transactions in original parsing order. type CurrentBalancesModifier s = R.ReaderT (Env s) (ExceptT String (ST s)) -- | Fill in any missing amounts and check that all journal transactions -- balance, or return an error message. This is done after parsing all -- amounts and applying canonical commodity styles, since balancing -- depends on display precision. Reports only the first error encountered. journalBalanceTransactions :: Bool -> Journal -> Either String Journal journalBalanceTransactions assrt j = runST $ journalBalanceTransactionsST assrt (journalNumberTransactions j) (newArray_ (1, genericLength $ jtxns j) :: forall s. ST s (STArray s Integer Transaction)) (\arr tx -> writeArray arr (tindex tx) tx) $ fmap (\txns -> j{ jtxns = txns}) . getElems -- | Generalization used in the definition of -- 'journalBalanceTransactionsST and 'journalCheckBalanceAssertions' journalBalanceTransactionsST :: Bool -> Journal -> ST s txns -- ^ creates transaction store -> (txns -> Transaction -> ST s ()) -- ^ "store" operation -> (txns -> ST s a) -- ^ calculate result from transactions -> ST s (Either String a) journalBalanceTransactionsST assrt j createStore storeIn extract = runExceptT $ do bals <- lift $ HT.newSized size txStore <- lift $ createStore flip R.runReaderT (Env bals (storeIn txStore) assrt $ Just $ jinferredcommodities j) $ do dated <- fmap snd . sortBy (comparing fst) . concat <$> mapM' discriminateByDate (jtxns j) mapM' checkInferAndRegisterAmounts dated lift $ extract txStore where size = genericLength $ journalPostings j -- | This converts a transaction into a list of objects whose dates -- have to be considered when checking balance assertions and handled -- by 'checkInferAndRegisterAmounts'. -- -- Transaction without balance assignments can be balanced and stored -- immediately and their (possibly) dated postings are returned. -- -- Transaction with balance assignments are only supported if no -- posting has a 'pdate' value. Supported transactions will be -- returned unchanged and balanced and stored later in 'checkInferAndRegisterAmounts'. discriminateByDate :: Transaction -> CurrentBalancesModifier s [(Day, Either Posting Transaction)] discriminateByDate tx | null (assignmentPostings tx) = do styles <- R.reader $ eStyles balanced <- lift $ ExceptT $ return $ balanceTransaction styles tx storeTransaction balanced return $ fmap (postingDate &&& (Left . removePrices)) $ tpostings $ balanced | True = do when (any (isJust . pdate) $ tpostings tx) $ throwError $ unlines $ ["Not supported: Transactions with balance assignments " ,"AND dated postings without amount:\n" , showTransaction tx] return [(tdate tx, Right $ tx { tpostings = removePrices <$> tpostings tx })] -- | This function takes different objects describing changes to -- account balances on a single day. It can handle either a single -- posting (from an already balanced transaction without assigments) -- or a whole transaction with assignments (which is required to no -- posting with pdate set.). -- -- For a single posting, there is not much to do. Only add its amount -- to its account and check the assertion, if there is one. This -- functionality is provided by 'addAmountAndCheckBalance'. -- -- For a whole transaction, it loops over all postings, and performs -- 'addAmountAndCheckBalance', if there is an amount. If there is no -- amount, the amount is inferred by the assertion or left empty if -- there is no assertion. Then, the transaction is balanced, the -- inferred amount added to the balance (all in -- 'balanceTransactionUpdate') and the resulting transaction with no -- missing amounts is stored in the array, for later retrieval. -- -- Again in short: -- -- 'Left Posting': Check the balance assertion and update the -- account balance. If the amount is empty do nothing. this can be -- the case e.g. for virtual postings -- -- 'Right Transaction': Loop over all postings, infer their amounts -- and then balance and store the transaction. checkInferAndRegisterAmounts :: Either Posting Transaction -> CurrentBalancesModifier s () checkInferAndRegisterAmounts (Left p) = void $ addAmountAndCheckBalance return p checkInferAndRegisterAmounts (Right oldTx) = do let ps = tpostings oldTx styles <- R.reader $ eStyles newPostings <- forM ps $ addAmountAndCheckBalance inferFromAssignment storeTransaction =<< balanceTransactionUpdate (fmap void . addToBalance) styles oldTx { tpostings = newPostings } where inferFromAssignment :: Posting -> CurrentBalancesModifier s Posting inferFromAssignment p = maybe (return p) (fmap (\a -> p { pamount = a, porigin = Just $ originalPosting p }) . setBalance (paccount p)) $ pbalanceassertion p -- | Adds a posting's amonut to the posting's account balance and -- checks a possible balance assertion. If there is no amount, it runs -- the supplied fallback action. addAmountAndCheckBalance :: (Posting -> CurrentBalancesModifier s Posting) -- ^ action to execute, if posting has no amount -> Posting -> CurrentBalancesModifier s Posting addAmountAndCheckBalance _ p | hasAmount p = do newAmt <- addToBalance (paccount p) $ pamount p assrt <- R.reader eAssrt lift $ when assrt $ ExceptT $ return $ checkBalanceAssertion p newAmt return p addAmountAndCheckBalance fallback p = fallback p -- | Sets an account's balance to a given amount and returns the -- difference of new and old amount setBalance :: AccountName -> Amount -> CurrentBalancesModifier s MixedAmount setBalance acc amt = liftModifier $ \Env{ eBalances = bals } -> do old <- HT.lookup bals acc let new = Mixed $ (amt :) $ maybe [] (filter ((/= acommodity amt) . acommodity) . amounts) old HT.insert bals acc new return $ maybe new (new -) old -- | Adds an amount to an account's balance and returns the resulting -- balance addToBalance :: AccountName -> MixedAmount -> CurrentBalancesModifier s MixedAmount addToBalance acc amt = liftModifier $ \Env{ eBalances = bals } -> do new <- maybe amt (+ amt) <$> HT.lookup bals acc HT.insert bals acc new return new -- | Stores a transaction in the transaction array in original parsing -- order. storeTransaction :: Transaction -> CurrentBalancesModifier s () storeTransaction tx = liftModifier $ ($tx) . eStoreTx -- | Helper function liftModifier :: (Env s -> ST s a) -> CurrentBalancesModifier s a liftModifier f = R.ask >>= lift . lift . f -- | Choose and apply a consistent display format to the posting -- amounts in each commodity. Each commodity's format is specified by -- a commodity format directive, or otherwise inferred from posting -- amounts as in hledger < 0.28. journalApplyCommodityStyles :: Journal -> Journal journalApplyCommodityStyles j@Journal{jtxns=ts, jmarketprices=mps} = j'' where j' = journalInferCommodityStyles j j'' = j'{jtxns=map fixtransaction ts, jmarketprices=map fixmarketprice mps} fixtransaction t@Transaction{tpostings=ps} = t{tpostings=map fixposting ps} fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a} fixmarketprice mp@MarketPrice{mpamount=a} = mp{mpamount=fixamount a} fixmixedamount (Mixed as) = Mixed $ map fixamount as fixamount a@Amount{acommodity=c} = a{astyle=journalCommodityStyle j' c} -- | Get this journal's standard display style for the given -- commodity. That is the style defined by the last corresponding -- commodity format directive if any, otherwise the style inferred -- from the posting amounts (or in some cases, price amounts) in this -- commodity if any, otherwise the default style. journalCommodityStyle :: Journal -> CommoditySymbol -> AmountStyle journalCommodityStyle j c = headDef amountstyle{asprecision=2} $ catMaybes [ M.lookup c (jcommodities j) >>= cformat ,M.lookup c $ jinferredcommodities j ] -- | Infer a display format for each commodity based on the amounts parsed. -- "hledger... will use the format of the first posting amount in the -- commodity, and the highest precision of all posting amounts in the commodity." journalInferCommodityStyles :: Journal -> Journal journalInferCommodityStyles j = j{jinferredcommodities = commodityStylesFromAmounts $ dbg8 "journalChooseCommmodityStyles using amounts" $ journalAmounts j} -- | Given a list of amounts in parse order, build a map from their commodity names -- to standard commodity display formats. commodityStylesFromAmounts :: [Amount] -> M.Map CommoditySymbol AmountStyle commodityStylesFromAmounts amts = M.fromList commstyles where samecomm = \a1 a2 -> acommodity a1 == acommodity a2 commamts = [(acommodity $ head as, as) | as <- groupBy samecomm $ sortBy (comparing acommodity) amts] commstyles = [(c, canonicalStyleFrom $ map astyle as) | (c,as) <- commamts] -- | Given an ordered list of amount styles, choose a canonical style. -- That is: the style of the first, and the -- maximum precision of all. canonicalStyleFrom :: [AmountStyle] -> AmountStyle canonicalStyleFrom [] = amountstyle canonicalStyleFrom ss@(first:_) = first{asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} where mgrps = maybe Nothing Just $ headMay $ catMaybes $ map asdigitgroups ss -- precision is maximum of all precisions prec = maximumStrict $ map asprecision ss mdec = Just $ headDef '.' $ catMaybes $ map asdecimalpoint ss -- precision is that of first amount with a decimal point -- (mdec, prec) = -- case filter (isJust . asdecimalpoint) ss of -- (s:_) -> (asdecimalpoint s, asprecision s) -- [] -> (Just '.', 0) -- -- | Apply this journal's historical price records to unpriced amounts where possible. -- journalApplyMarketPrices :: Journal -> Journal -- journalApplyMarketPrices j@Journal{jtxns=ts} = j{jtxns=map fixtransaction ts} -- where -- fixtransaction t@Transaction{tdate=d, tpostings=ps} = t{tpostings=map fixposting ps} -- where -- fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a} -- fixmixedamount (Mixed as) = Mixed $ map fixamount as -- fixamount = fixprice -- fixprice a@Amount{price=Just _} = a -- fixprice a@Amount{commodity=c} = a{price=maybe Nothing (Just . UnitPrice) $ journalMarketPriceFor j d c} -- -- | Get the price for a commodity on the specified day from the price database, if known. -- -- Does only one lookup step, ie will not look up the price of a price. -- journalMarketPriceFor :: Journal -> Day -> CommoditySymbol -> Maybe MixedAmount -- journalMarketPriceFor j d CommoditySymbol{symbol=s} = do -- let ps = reverse $ filter ((<= d).mpdate) $ filter ((s==).hsymbol) $ sortBy (comparing mpdate) $ jmarketprices j -- case ps of (MarketPrice{mpamount=a}:_) -> Just a -- _ -> Nothing -- | Convert all this journal's amounts to cost by applying their prices, if any. journalConvertAmountsToCost :: Journal -> Journal journalConvertAmountsToCost j@Journal{jtxns=ts} = j{jtxns=map fixtransaction ts} where -- similar to journalApplyCommodityStyles fixtransaction t@Transaction{tpostings=ps} = t{tpostings=map fixposting ps} fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a} fixmixedamount (Mixed as) = Mixed $ map fixamount as fixamount = canonicaliseAmount (jinferredcommodities j) . costOfAmount -- -- | Get this journal's unique, display-preference-canonicalised commodities, by symbol. -- journalCanonicalCommodities :: Journal -> M.Map String CommoditySymbol -- journalCanonicalCommodities j = canonicaliseCommodities $ journalAmountCommodities j -- -- | Get all this journal's amounts' commodities, in the order parsed. -- journalAmountCommodities :: Journal -> [CommoditySymbol] -- journalAmountCommodities = map acommodity . concatMap amounts . journalAmounts -- -- | Get all this journal's amount and price commodities, in the order parsed. -- journalAmountAndPriceCommodities :: Journal -> [CommoditySymbol] -- journalAmountAndPriceCommodities = concatMap amountCommodities . concatMap amounts . journalAmounts -- -- | Get this amount's commodity and any commodities referenced in its price. -- amountCommodities :: Amount -> [CommoditySymbol] -- amountCommodities Amount{acommodity=c,aprice=p} = -- case p of Nothing -> [c] -- Just (UnitPrice ma) -> c:(concatMap amountCommodities $ amounts ma) -- Just (TotalPrice ma) -> c:(concatMap amountCommodities $ amounts ma) -- | Get an ordered list of the amounts in this journal which will -- influence amount style canonicalisation. These are: -- -- * amounts in market price directives (in parse order) -- * amounts in postings (in parse order) -- -- Amounts in default commodity directives also influence -- canonicalisation, but earlier, as amounts are parsed. -- Amounts in posting prices are not used for canonicalisation. -- journalAmounts :: Journal -> [Amount] journalAmounts = getConst . traverseJournalAmounts (Const . (:[])) -- | Maps over all of the amounts in the journal overJournalAmounts :: (Amount -> Amount) -> Journal -> Journal overJournalAmounts f = runIdentity . traverseJournalAmounts (Identity . f) -- | Traverses over all ofthe amounts in the journal, in the order -- indicated by 'journalAmounts'. traverseJournalAmounts :: Applicative f => (Amount -> f Amount) -> Journal -> f Journal traverseJournalAmounts f j = recombine <$> (traverse . mpa) f (jmarketprices j) <*> (traverse . tp . traverse . pamt . maa . traverse) f (jtxns j) where recombine mps txns = j { jmarketprices = mps, jtxns = txns } -- a bunch of traversals mpa g mp = (\amt -> mp { mpamount = amt }) <$> g (mpamount mp) tp g t = (\ps -> t { tpostings = ps }) <$> g (tpostings t) pamt g p = (\amt -> p { pamount = amt }) <$> g (pamount p) maa g (Mixed as) = Mixed <$> g as -- | The fully specified date span enclosing the dates (primary or secondary) -- of all this journal's transactions and postings, or DateSpan Nothing Nothing -- if there are none. journalDateSpan :: Bool -> Journal -> DateSpan journalDateSpan secondary j | null ts = DateSpan Nothing Nothing | otherwise = DateSpan (Just earliest) (Just $ addDays 1 latest) where earliest = minimumStrict dates latest = maximumStrict dates dates = pdates ++ tdates tdates = map (if secondary then transactionDate2 else tdate) ts pdates = concatMap (catMaybes . map (if secondary then (Just . postingDate2) else pdate) . tpostings) ts ts = jtxns j -- #ifdef TESTS test_journalDateSpan = do "journalDateSpan" ~: do assertEqual "" (DateSpan (Just $ fromGregorian 2014 1 10) (Just $ fromGregorian 2014 10 11)) (journalDateSpan True j) where j = nulljournal{jtxns = [nulltransaction{tdate = parsedate "2014/02/01" ,tpostings = [posting{pdate=Just (parsedate "2014/01/10")}] } ,nulltransaction{tdate = parsedate "2014/09/01" ,tpostings = [posting{pdate2=Just (parsedate "2014/10/10")}] } ]} -- #endif -- Misc helpers -- | Check if a set of hledger account/description filter patterns matches the -- given account name or entry description. Patterns are case-insensitive -- regular expressions. Prefixed with not:, they become anti-patterns. matchpats :: [String] -> String -> Bool matchpats pats str = (null positives || any match positives) && (null negatives || not (any match negatives)) where (negatives,positives) = partition isnegativepat pats match "" = True match pat = regexMatchesCI (abspat pat) str negateprefix = "not:" isnegativepat = (negateprefix `isPrefixOf`) abspat pat = if isnegativepat pat then drop (length negateprefix) pat else pat -- debug helpers -- traceAmountPrecision a = trace (show $ map (precision . acommodity) $ amounts a) a -- tracePostingsCommodities ps = trace (show $ map ((map (precision . acommodity) . amounts) . pamount) ps) ps -- tests -- A sample journal for testing, similar to examples/sample.journal: -- -- 2008/01/01 income -- assets:bank:checking $1 -- income:salary -- -- 2008/06/01 gift -- assets:bank:checking $1 -- income:gifts -- -- 2008/06/02 save -- assets:bank:saving $1 -- assets:bank:checking -- -- 2008/06/03 * eat & shop -- expenses:food $1 -- expenses:supplies $1 -- assets:cash -- -- 2008/12/31 * pay off -- liabilities:debts $1 -- assets:bank:checking -- Right samplejournal = journalBalanceTransactions False $ nulljournal {jtxns = [ txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/01/01", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="income", tcomment="", ttags=[], tpostings= ["assets:bank:checking" `post` usd 1 ,"income:salary" `post` missingamt ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/06/01", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="gift", tcomment="", ttags=[], tpostings= ["assets:bank:checking" `post` usd 1 ,"income:gifts" `post` missingamt ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/06/02", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="save", tcomment="", ttags=[], tpostings= ["assets:bank:saving" `post` usd 1 ,"assets:bank:checking" `post` usd (-1) ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/06/03", tdate2=Nothing, tstatus=Cleared, tcode="", tdescription="eat & shop", tcomment="", ttags=[], tpostings=["expenses:food" `post` usd 1 ,"expenses:supplies" `post` usd 1 ,"assets:cash" `post` missingamt ], tpreceding_comment_lines="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/12/31", tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="pay off", tcomment="", ttags=[], tpostings=["liabilities:debts" `post` usd 1 ,"assets:bank:checking" `post` usd (-1) ], tpreceding_comment_lines="" } ] } tests_Hledger_Data_Journal = TestList $ [ test_journalDateSpan -- "query standard account types" ~: -- do -- let j = journal1 -- journalBalanceSheetAccountNames j `is` ["assets","assets:a","equity","equity:q","equity:q:qq","liabilities","liabilities:l"] -- journalProfitAndLossAccountNames j `is` ["expenses","expenses:e","income","income:i"] ] hledger-lib-1.2/Hledger/Data/Ledger.hs0000644000000000000000000000725013035210046015677 0ustar0000000000000000{-| A 'Ledger' is derived from a 'Journal' by applying a filter specification to select 'Transaction's and 'Posting's of interest. It contains the filtered journal and knows the resulting chart of accounts, account balances, and postings in each account. -} module Hledger.Data.Ledger where import qualified Data.Map as M -- import Data.Text (Text) import qualified Data.Text as T import Safe (headDef) import Test.HUnit import Text.Printf import Hledger.Data.Types import Hledger.Data.Account import Hledger.Data.Journal import Hledger.Data.Posting import Hledger.Query instance Show Ledger where show l = printf "Ledger with %d transactions, %d accounts\n" --"%s" (length (jtxns $ ljournal l) + length (jmodifiertxns $ ljournal l) + length (jperiodictxns $ ljournal l)) (length $ ledgerAccountNames l) -- (showtree $ ledgerAccountNameTree l) nullledger :: Ledger nullledger = Ledger { ljournal = nulljournal, laccounts = [] } -- | Filter a journal's transactions with the given query, then derive -- a ledger containing the chart of accounts and balances. If the -- query includes a depth limit, that will affect the ledger's -- journal but not the ledger's account tree. ledgerFromJournal :: Query -> Journal -> Ledger ledgerFromJournal q j = nullledger{ljournal=j'', laccounts=as} where (q',depthq) = (filterQuery (not . queryIsDepth) q, filterQuery queryIsDepth q) j' = filterJournalAmounts (filterQuery queryIsSym q) $ -- remove amount parts which the query's sym: terms would exclude filterJournalPostings q' j as = accountsFromPostings $ journalPostings j' j'' = filterJournalPostings depthq j' -- | List a ledger's account names. ledgerAccountNames :: Ledger -> [AccountName] ledgerAccountNames = drop 1 . map aname . laccounts -- | Get the named account from a ledger. ledgerAccount :: Ledger -> AccountName -> Maybe Account ledgerAccount l a = lookupAccount a $ laccounts l -- | Get this ledger's root account, which is a dummy "root" account -- above all others. This should always be first in the account list, -- if somehow not this returns a null account. ledgerRootAccount :: Ledger -> Account ledgerRootAccount = headDef nullacct . laccounts -- | List a ledger's top-level accounts (the ones below the root), in tree order. ledgerTopAccounts :: Ledger -> [Account] ledgerTopAccounts = asubs . head . laccounts -- | List a ledger's bottom-level (subaccount-less) accounts, in tree order. ledgerLeafAccounts :: Ledger -> [Account] ledgerLeafAccounts = filter (null.asubs) . laccounts -- | Accounts in ledger whose name matches the pattern, in tree order. ledgerAccountsMatching :: [String] -> Ledger -> [Account] ledgerAccountsMatching pats = filter (matchpats pats . T.unpack . aname) . laccounts -- XXX pack -- | List a ledger's postings, in the order parsed. ledgerPostings :: Ledger -> [Posting] ledgerPostings = journalPostings . ljournal -- | The (fully specified) date span containing all the ledger's (filtered) transactions, -- or DateSpan Nothing Nothing if there are none. ledgerDateSpan :: Ledger -> DateSpan ledgerDateSpan = postingsDateSpan . ledgerPostings -- | All commodities used in this ledger. ledgerCommodities :: Ledger -> [CommoditySymbol] ledgerCommodities = M.keys . jinferredcommodities . ljournal tests_ledgerFromJournal = [ "ledgerFromJournal" ~: do assertEqual "" (0) (length $ ledgerPostings $ ledgerFromJournal Any nulljournal) assertEqual "" (11) (length $ ledgerPostings $ ledgerFromJournal Any samplejournal) assertEqual "" (6) (length $ ledgerPostings $ ledgerFromJournal (Depth 2) samplejournal) ] tests_Hledger_Data_Ledger = TestList $ tests_ledgerFromJournal hledger-lib-1.2/Hledger/Data/MarketPrice.hs0000644000000000000000000000160313066173044016711 0ustar0000000000000000{-| A 'MarketPrice' represents a historical exchange rate between two commodities. (Ledger calls them historical prices.) For example, prices published by a stock exchange or the foreign exchange market. Some commands (balance, currently) can use this information to show the market value of things at a given date. -} {-# LANGUAGE OverloadedStrings, LambdaCase #-} module Hledger.Data.MarketPrice where import qualified Data.Text as T import Test.HUnit import Hledger.Data.Amount import Hledger.Data.Dates import Hledger.Data.Types -- | Get the string representation of an market price, based on its -- commodity's display settings. showMarketPrice :: MarketPrice -> String showMarketPrice mp = unwords [ "P" , showDate (mpdate mp) , T.unpack (mpcommodity mp) , (showAmount . setAmountPrecision maxprecision) (mpamount mp) ] tests_Hledger_Data_MarketPrice = TestList [] hledger-lib-1.2/Hledger/Data/Period.hs0000644000000000000000000002756013035510426015732 0ustar0000000000000000{-| Manipulate the time periods typically used for reports with Period, a richer abstraction than DateSpan. See also Types and Dates. -} module Hledger.Data.Period where import Data.Time.Calendar import Data.Time.Calendar.MonthDay import Data.Time.Calendar.OrdinalDate import Data.Time.Calendar.WeekDate import Data.Time.Format import Text.Printf import Hledger.Data.Types -- | Convert Periods to DateSpans. -- -- >>> periodAsDateSpan (MonthPeriod 2000 1) == DateSpan (Just $ fromGregorian 2000 1 1) (Just $ fromGregorian 2000 2 1) -- True periodAsDateSpan :: Period -> DateSpan periodAsDateSpan (DayPeriod d) = DateSpan (Just d) (Just $ addDays 1 d) periodAsDateSpan (WeekPeriod b) = DateSpan (Just b) (Just $ addDays 7 b) periodAsDateSpan (MonthPeriod y m) = DateSpan (Just $ fromGregorian y m 1) (Just $ fromGregorian y' m' 1) where (y',m') | m==12 = (y+1,1) | otherwise = (y,m+1) periodAsDateSpan (QuarterPeriod y q) = DateSpan (Just $ fromGregorian y m 1) (Just $ fromGregorian y' m' 1) where (y', q') | q==4 = (y+1,1) | otherwise = (y,q+1) quarterAsMonth q = (q-1) * 3 + 1 m = quarterAsMonth q m' = quarterAsMonth q' periodAsDateSpan (YearPeriod y) = DateSpan (Just $ fromGregorian y 1 1) (Just $ fromGregorian (y+1) 1 1) periodAsDateSpan (PeriodBetween b e) = DateSpan (Just b) (Just e) periodAsDateSpan (PeriodFrom b) = DateSpan (Just b) Nothing periodAsDateSpan (PeriodTo e) = DateSpan Nothing (Just e) periodAsDateSpan (PeriodAll) = DateSpan Nothing Nothing -- | Convert DateSpans to Periods. -- -- >>> dateSpanAsPeriod $ DateSpan (Just $ fromGregorian 2000 1 1) (Just $ fromGregorian 2000 2 1) -- MonthPeriod 2000 1 dateSpanAsPeriod :: DateSpan -> Period dateSpanAsPeriod (DateSpan (Just b) (Just e)) = simplifyPeriod $ PeriodBetween b e dateSpanAsPeriod (DateSpan (Just b) Nothing) = PeriodFrom b dateSpanAsPeriod (DateSpan Nothing (Just e)) = PeriodTo e dateSpanAsPeriod (DateSpan Nothing Nothing) = PeriodAll -- | Convert PeriodBetweens to a more abstract period where possible. -- -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 1 1 1) (fromGregorian 2 1 1) -- YearPeriod 1 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 10 1) (fromGregorian 2001 1 1) -- QuarterPeriod 2000 4 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 1) (fromGregorian 2000 3 1) -- MonthPeriod 2000 2 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2016 7 25) (fromGregorian 2016 8 1) -- WeekPeriod 2016-07-25 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 1 1) (fromGregorian 2000 1 2) -- DayPeriod 2000-01-01 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 28) (fromGregorian 2000 3 1) -- PeriodBetween 2000-02-28 2000-03-01 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 29) (fromGregorian 2000 3 1) -- DayPeriod 2000-02-29 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 12 31) (fromGregorian 2001 1 1) -- DayPeriod 2000-12-31 -- simplifyPeriod :: Period -> Period simplifyPeriod (PeriodBetween b e) = case (toGregorian b, toGregorian e) of -- a year ((by,1,1), (ey,1,1)) | by+1==ey -> YearPeriod by -- a half-year -- ((by,1,1), (ey,7,1)) | by==ey -> -- ((by,7,1), (ey,1,1)) | by+1==ey -> -- a quarter ((by,1,1), (ey,4,1)) | by==ey -> QuarterPeriod by 1 ((by,4,1), (ey,7,1)) | by==ey -> QuarterPeriod by 2 ((by,7,1), (ey,10,1)) | by==ey -> QuarterPeriod by 3 ((by,10,1), (ey,1,1)) | by+1==ey -> QuarterPeriod by 4 -- a month ((by,bm,1), (ey,em,1)) | by==ey && bm+1==em -> MonthPeriod by bm ((by,12,1), (ey,1,1)) | by+1==ey -> MonthPeriod by 12 -- a week (two successive mondays), -- YYYYwN ("week N of year YYYY") -- _ | let ((by,bw,bd), (ey,ew,ed)) = (toWeekDate from, toWeekDate to) in by==ey && fw+1==tw && bd==1 && ed==1 -> -- a week starting on a monday _ | let ((by,bw,bd), (ey,ew,ed)) = (toWeekDate b, toWeekDate (addDays (-1) e)) in by==ey && bw==ew && bd==1 && ed==7 -> WeekPeriod b -- a day ((by,bm,bd), (ey,em,ed)) | (by==ey && bm==em && bd+1==ed) || (by+1==ey && bm==12 && em==1 && bd==31 && ed==1) || -- crossing a year boundary (by==ey && bm+1==em && isLastDayOfMonth by bm bd && ed==1) -- crossing a month boundary -> DayPeriod b _ -> PeriodBetween b e simplifyPeriod p = p isLastDayOfMonth y m d = case m of 1 -> d==31 2 | isLeapYear y -> d==29 | otherwise -> d==28 3 -> d==31 4 -> d==30 5 -> d==31 6 -> d==30 7 -> d==31 8 -> d==31 9 -> d==30 10 -> d==31 11 -> d==30 12 -> d==31 _ -> False -- | Is this period a "standard" period, referencing a particular day, week, month, quarter, or year ? -- Periods of other durations, or infinite duration, or not starting on a standard period boundary, are not. isStandardPeriod = isStandardPeriod' . simplifyPeriod where isStandardPeriod' (DayPeriod _) = True isStandardPeriod' (WeekPeriod _) = True isStandardPeriod' (MonthPeriod _ _) = True isStandardPeriod' (QuarterPeriod _ _) = True isStandardPeriod' (YearPeriod _) = True isStandardPeriod' _ = False -- | Render a period as a compact display string suitable for user output. -- -- >>> showPeriod (WeekPeriod (fromGregorian 2016 7 25)) -- "2016/07/25w30" showPeriod (DayPeriod b) = formatTime defaultTimeLocale "%0C%y/%m/%d" b -- DATE showPeriod (WeekPeriod b) = formatTime defaultTimeLocale "%0C%y/%m/%dw%V" b -- STARTDATEwYEARWEEK showPeriod (MonthPeriod y m) = printf "%04d/%02d" y m -- YYYY/MM showPeriod (QuarterPeriod y q) = printf "%04dq%d" y q -- YYYYqN showPeriod (YearPeriod y) = printf "%04d" y -- YYYY showPeriod (PeriodBetween b e) = formatTime defaultTimeLocale "%0C%y/%m/%d" b ++ formatTime defaultTimeLocale "-%0C%y/%m/%d" (addDays (-1) e) -- STARTDATE-INCLUSIVEENDDATE showPeriod (PeriodFrom b) = formatTime defaultTimeLocale "%0C%y/%m/%d-" b -- STARTDATE- showPeriod (PeriodTo e) = formatTime defaultTimeLocale "-%0C%y/%m/%d" (addDays (-1) e) -- -INCLUSIVEENDDATE showPeriod PeriodAll = "-" periodStart :: Period -> Maybe Day periodStart p = mb where DateSpan mb _ = periodAsDateSpan p periodEnd :: Period -> Maybe Day periodEnd p = me where DateSpan _ me = periodAsDateSpan p -- | Move a standard period to the following period of same duration. -- Non-standard periods are unaffected. periodNext :: Period -> Period periodNext (DayPeriod b) = DayPeriod (addDays 1 b) periodNext (WeekPeriod b) = WeekPeriod (addDays 7 b) periodNext (MonthPeriod y 12) = MonthPeriod (y+1) 1 periodNext (MonthPeriod y m) = MonthPeriod y (m+1) periodNext (QuarterPeriod y 4) = QuarterPeriod (y+1) 1 periodNext (QuarterPeriod y q) = QuarterPeriod y (q+1) periodNext (YearPeriod y) = YearPeriod (y+1) periodNext p = p -- | Move a standard period to the preceding period of same duration. -- Non-standard periods are unaffected. periodPrevious :: Period -> Period periodPrevious (DayPeriod b) = DayPeriod (addDays (-1) b) periodPrevious (WeekPeriod b) = WeekPeriod (addDays (-7) b) periodPrevious (MonthPeriod y 1) = MonthPeriod (y-1) 12 periodPrevious (MonthPeriod y m) = MonthPeriod y (m-1) periodPrevious (QuarterPeriod y 1) = QuarterPeriod (y-1) 4 periodPrevious (QuarterPeriod y q) = QuarterPeriod y (q-1) periodPrevious (YearPeriod y) = YearPeriod (y-1) periodPrevious p = p -- | Move a standard period to the following period of same duration, staying within enclosing dates. -- Non-standard periods are unaffected. periodNextIn :: DateSpan -> Period -> Period periodNextIn (DateSpan _ (Just e)) p = case mb of Just b -> if b < e then p' else p _ -> p where p' = periodNext p mb = periodStart p' periodNextIn _ p = periodNext p -- | Move a standard period to the preceding period of same duration, staying within enclosing dates. -- Non-standard periods are unaffected. periodPreviousIn :: DateSpan -> Period -> Period periodPreviousIn (DateSpan (Just b) _) p = case me of Just e -> if e > b then p' else p _ -> p where p' = periodPrevious p me = periodEnd p' periodPreviousIn _ p = periodPrevious p -- | Move a standard period stepwise so that it encloses the given date. -- Non-standard periods are unaffected. periodMoveTo :: Day -> Period -> Period periodMoveTo d (DayPeriod _) = DayPeriod d periodMoveTo d (WeekPeriod _) = WeekPeriod $ mondayBefore d periodMoveTo d (MonthPeriod _ _) = MonthPeriod y m where (y,m,_) = toGregorian d periodMoveTo d (QuarterPeriod _ _) = QuarterPeriod y q where (y,m,_) = toGregorian d q = quarterContainingMonth m periodMoveTo d (YearPeriod _) = YearPeriod y where (y,_,_) = toGregorian d periodMoveTo _ p = p -- | Enlarge a standard period to the next larger enclosing standard period, if there is one. -- Eg, a day becomes the enclosing week. -- A week becomes whichever month the week's thursday falls into. -- A year becomes all (unlimited). -- Non-standard periods (arbitrary dates, or open-ended) are unaffected. periodGrow :: Period -> Period periodGrow (DayPeriod b) = WeekPeriod $ mondayBefore b periodGrow (WeekPeriod b) = MonthPeriod y m where (y,m) = yearMonthContainingWeekStarting b periodGrow (MonthPeriod y m) = QuarterPeriod y (quarterContainingMonth m) periodGrow (QuarterPeriod y _) = YearPeriod y periodGrow (YearPeriod _) = PeriodAll periodGrow p = p -- | Shrink a period to the next smaller standard period inside it, -- choosing the subperiod which contains today's date if possible, -- otherwise the first subperiod. It goes like this: -- unbounded periods and nonstandard periods (between two arbitrary dates) -> -- current year -> -- current quarter if it's in selected year, otherwise first quarter of selected year -> -- current month if it's in selected quarter, otherwise first month of selected quarter -> -- current week if it's in selected month, otherwise first week of selected month -> -- today if it's in selected week, otherwise first day of selected week, -- unless that's in previous month, in which case first day of month containing selected week. -- Shrinking a day has no effect. periodShrink :: Day -> Period -> Period periodShrink _ p@(DayPeriod _) = p periodShrink today (WeekPeriod b) | today >= b && diffDays today b < 7 = DayPeriod today | m /= weekmonth = DayPeriod $ fromGregorian weekyear weekmonth 1 | otherwise = DayPeriod b where (_,m,_) = toGregorian b (weekyear,weekmonth) = yearMonthContainingWeekStarting b periodShrink today (MonthPeriod y m) | (y',m') == (y,m) = WeekPeriod $ mondayBefore today | otherwise = WeekPeriod $ startOfFirstWeekInMonth y m where (y',m',_) = toGregorian today periodShrink today (QuarterPeriod y q) | quarterContainingMonth thismonth == q = MonthPeriod y thismonth | otherwise = MonthPeriod y (firstMonthOfQuarter q) where (_,thismonth,_) = toGregorian today periodShrink today (YearPeriod y) | y == thisyear = QuarterPeriod y thisquarter | otherwise = QuarterPeriod y 1 where (thisyear,thismonth,_) = toGregorian today thisquarter = quarterContainingMonth thismonth periodShrink today _ = YearPeriod y where (y,_,_) = toGregorian today mondayBefore d = addDays (fromIntegral (1 - wd)) d where (_,_,wd) = toWeekDate d yearMonthContainingWeekStarting weekstart = (y,m) where thu = addDays 3 weekstart (y,yd) = toOrdinalDate thu (m,_) = dayOfYearToMonthAndDay (isLeapYear y) yd quarterContainingMonth m = (m-1) `div` 3 + 1 firstMonthOfQuarter q = (q-1)*3 + 1 startOfFirstWeekInMonth y m | monthstartday <= 4 = mon | otherwise = addDays 7 mon -- month starts with a fri/sat/sun where monthstart = fromGregorian y m 1 mon = mondayBefore monthstart (_,_,monthstartday) = toWeekDate monthstart hledger-lib-1.2/Hledger/Data/Posting.hs0000644000000000000000000003000113066173044016120 0ustar0000000000000000{-| A 'Posting' represents a change (by some 'MixedAmount') of the balance in some 'Account'. Each 'Transaction' contains two or more postings which should add up to 0. Postings reference their parent transaction, so we can look up the date or description there. -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Posting ( -- * Posting nullposting, posting, post, -- * operations originalPosting, postingStatus, isReal, isVirtual, isBalancedVirtual, isEmptyPosting, isAssignment, hasAmount, postingAllTags, transactionAllTags, postingAllImplicitTags, relatedPostings, removePrices, -- * date operations postingDate, postingDate2, isPostingInDateSpan, isPostingInDateSpan', postingsDateSpan, postingsDateSpan', -- * account name operations accountNamesFromPostings, accountNamePostingType, accountNameWithoutPostingType, accountNameWithPostingType, joinAccountNames, concatAccountNames, accountNameApplyAliases, accountNameApplyAliasesMemo, -- * transaction description operations transactionPayee, transactionNote, payeeAndNoteFromDescription, -- * arithmetic sumPostings, -- * rendering showPosting, -- * misc. showComment, tests_Hledger_Data_Posting ) where import Data.List import Data.Maybe import Data.MemoUgly (memo) import Data.Monoid import Data.Ord import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Safe import Test.HUnit import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Amount import Hledger.Data.AccountName import Hledger.Data.Dates (nulldate, spanContainsDate) instance Show Posting where show = showPosting nullposting, posting :: Posting nullposting = Posting {pdate=Nothing ,pdate2=Nothing ,pstatus=Uncleared ,paccount="" ,pamount=nullmixedamt ,pcomment="" ,ptype=RegularPosting ,ptags=[] ,pbalanceassertion=Nothing ,ptransaction=Nothing ,porigin=Nothing } posting = nullposting post :: AccountName -> Amount -> Posting post acct amt = posting {paccount=acct, pamount=Mixed [amt]} originalPosting :: Posting -> Posting originalPosting p = fromMaybe p $ porigin p -- XXX once rendered user output, but just for debugging now; clean up showPosting :: Posting -> String showPosting p@Posting{paccount=a,pamount=amt,ptype=t} = unlines $ [concatTopPadded [show (postingDate p) ++ " ", showaccountname a ++ " ", showamount amt, showComment (pcomment p)]] where ledger3ishlayout = False acctnamewidth = if ledger3ishlayout then 25 else 22 showaccountname = fitString (Just acctnamewidth) Nothing False False . bracket . T.unpack . elideAccountName width (bracket,width) = case t of BalancedVirtualPosting -> (\s -> "["++s++"]", acctnamewidth-2) VirtualPosting -> (\s -> "("++s++")", acctnamewidth-2) _ -> (id,acctnamewidth) showamount = padLeftWide 12 . showMixedAmount showComment :: Text -> String showComment t = if T.null t then "" else " ;" ++ T.unpack t isReal :: Posting -> Bool isReal p = ptype p == RegularPosting isVirtual :: Posting -> Bool isVirtual p = ptype p == VirtualPosting isBalancedVirtual :: Posting -> Bool isBalancedVirtual p = ptype p == BalancedVirtualPosting hasAmount :: Posting -> Bool hasAmount = (/= missingmixedamt) . pamount isAssignment :: Posting -> Bool isAssignment p = not (hasAmount p) && isJust (pbalanceassertion p) accountNamesFromPostings :: [Posting] -> [AccountName] accountNamesFromPostings = nub . map paccount sumPostings :: [Posting] -> MixedAmount sumPostings = sumStrict . map pamount -- | Remove all prices of a posting removePrices :: Posting -> Posting removePrices p = p{ pamount = Mixed $ remove <$> amounts (pamount p) } where remove a = a { aprice = NoPrice } -- | Get a posting's (primary) date - it's own primary date if specified, -- otherwise the parent transaction's primary date, or the null date if -- there is no parent transaction. postingDate :: Posting -> Day postingDate p = fromMaybe txndate $ pdate p where txndate = maybe nulldate tdate $ ptransaction p -- | Get a posting's secondary (secondary) date, which is the first of: -- posting's secondary date, transaction's secondary date, posting's -- primary date, transaction's primary date, or the null date if there is -- no parent transaction. postingDate2 :: Posting -> Day postingDate2 p = headDef nulldate $ catMaybes dates where dates = [pdate2 p ,maybe Nothing tdate2 $ ptransaction p ,pdate p ,maybe Nothing (Just . tdate) $ ptransaction p ] -- | Get a posting's cleared status: cleared or pending if those are -- explicitly set, otherwise the cleared status of its parent -- transaction, or uncleared if there is no parent transaction. (Note -- Uncleared's ambiguity, it can mean "uncleared" or "don't know". postingStatus :: Posting -> ClearedStatus postingStatus Posting{pstatus=s, ptransaction=mt} | s == Uncleared = case mt of Just t -> tstatus t Nothing -> Uncleared | otherwise = s -- | Implicit tags for this transaction. transactionImplicitTags :: Transaction -> [Tag] transactionImplicitTags t = filter (not . T.null . snd) [("code", tcode t) ,("description", tdescription t) ,("payee", transactionPayee t) ,("note", transactionNote t) ] transactionPayee :: Transaction -> Text transactionPayee = fst . payeeAndNoteFromDescription . tdescription transactionNote :: Transaction -> Text transactionNote = fst . payeeAndNoteFromDescription . tdescription -- | Parse a transaction's description into payee and note (aka narration) fields, -- assuming a convention of separating these with | (like Beancount). -- Ie, everything up to the first | is the payee, everything after it is the note. -- When there's no |, payee == note == description. payeeAndNoteFromDescription :: Text -> (Text,Text) payeeAndNoteFromDescription t = (textstrip p, textstrip $ T.tail n) where (p,n) = T.breakOn "|" t -- | Tags for this posting including implicit and any inherited from its parent transaction. postingAllImplicitTags :: Posting -> [Tag] postingAllImplicitTags p = ptags p ++ maybe [] transactionTags (ptransaction p) where transactionTags t = ttags t ++ transactionImplicitTags t -- | Tags for this posting including any inherited from its parent transaction. postingAllTags :: Posting -> [Tag] postingAllTags p = ptags p ++ maybe [] ttags (ptransaction p) -- | Tags for this transaction including any from its postings. transactionAllTags :: Transaction -> [Tag] transactionAllTags t = ttags t ++ concatMap ptags (tpostings t) -- Get the other postings from this posting's transaction. relatedPostings :: Posting -> [Posting] relatedPostings p@Posting{ptransaction=Just t} = filter (/= p) $ tpostings t relatedPostings _ = [] -- | Does this posting fall within the given date span ? isPostingInDateSpan :: DateSpan -> Posting -> Bool isPostingInDateSpan s = spanContainsDate s . postingDate -- --date2-sensitive version, separate for now to avoid disturbing multiBalanceReport. isPostingInDateSpan' :: WhichDate -> DateSpan -> Posting -> Bool isPostingInDateSpan' PrimaryDate s = spanContainsDate s . postingDate isPostingInDateSpan' SecondaryDate s = spanContainsDate s . postingDate2 isEmptyPosting :: Posting -> Bool isEmptyPosting = isZeroMixedAmount . pamount -- | Get the minimal date span which contains all the postings, or the -- null date span if there are none. postingsDateSpan :: [Posting] -> DateSpan postingsDateSpan [] = DateSpan Nothing Nothing postingsDateSpan ps = DateSpan (Just $ postingDate $ head ps') (Just $ addDays 1 $ postingDate $ last ps') where ps' = sortBy (comparing postingDate) ps -- --date2-sensitive version, as above. postingsDateSpan' :: WhichDate -> [Posting] -> DateSpan postingsDateSpan' _ [] = DateSpan Nothing Nothing postingsDateSpan' wd ps = DateSpan (Just $ postingdate $ head ps') (Just $ addDays 1 $ postingdate $ last ps') where ps' = sortBy (comparing postingdate) ps postingdate = if wd == PrimaryDate then postingDate else postingDate2 -- AccountName stuff that depends on PostingType accountNamePostingType :: AccountName -> PostingType accountNamePostingType a | T.null a = RegularPosting | T.head a == '[' && T.last a == ']' = BalancedVirtualPosting | T.head a == '(' && T.last a == ')' = VirtualPosting | otherwise = RegularPosting accountNameWithoutPostingType :: AccountName -> AccountName accountNameWithoutPostingType a = case accountNamePostingType a of BalancedVirtualPosting -> T.init $ T.tail a VirtualPosting -> T.init $ T.tail a RegularPosting -> a accountNameWithPostingType :: PostingType -> AccountName -> AccountName accountNameWithPostingType BalancedVirtualPosting a = "["<>accountNameWithoutPostingType a<>"]" accountNameWithPostingType VirtualPosting a = "("<>accountNameWithoutPostingType a<>")" accountNameWithPostingType RegularPosting a = accountNameWithoutPostingType a -- | Prefix one account name to another, preserving posting type -- indicators like concatAccountNames. joinAccountNames :: AccountName -> AccountName -> AccountName joinAccountNames a b = concatAccountNames $ filter (not . T.null) [a,b] -- | Join account names into one. If any of them has () or [] posting type -- indicators, these (the first type encountered) will also be applied to -- the resulting account name. concatAccountNames :: [AccountName] -> AccountName concatAccountNames as = accountNameWithPostingType t $ T.intercalate ":" $ map accountNameWithoutPostingType as where t = headDef RegularPosting $ filter (/= RegularPosting) $ map accountNamePostingType as -- | Rewrite an account name using all matching aliases from the given list, in sequence. -- Each alias sees the result of applying the previous aliases. accountNameApplyAliases :: [AccountAlias] -> AccountName -> AccountName accountNameApplyAliases aliases a = accountNameWithPostingType atype aname' where (aname,atype) = (accountNameWithoutPostingType a, accountNamePostingType a) aname' = foldl (\acct alias -> dbg6 "result" $ aliasReplace (dbg6 "alias" alias) (dbg6 "account" acct)) aname aliases -- | Memoising version of accountNameApplyAliases, maybe overkill. accountNameApplyAliasesMemo :: [AccountAlias] -> AccountName -> AccountName accountNameApplyAliasesMemo aliases = memo (accountNameApplyAliases aliases) -- aliasMatches :: AccountAlias -> AccountName -> Bool -- aliasMatches (BasicAlias old _) a = old `isAccountNamePrefixOf` a -- aliasMatches (RegexAlias re _) a = regexMatchesCI re a aliasReplace :: AccountAlias -> AccountName -> AccountName aliasReplace (BasicAlias old new) a | old `isAccountNamePrefixOf` a || old == a = new <> T.drop (T.length old) a | otherwise = a aliasReplace (RegexAlias re repl) a = T.pack $ regexReplaceCIMemo re repl $ T.unpack a -- XXX tests_Hledger_Data_Posting = TestList [ "accountNamePostingType" ~: do accountNamePostingType "a" `is` RegularPosting accountNamePostingType "(a)" `is` VirtualPosting accountNamePostingType "[a]" `is` BalancedVirtualPosting ,"accountNameWithoutPostingType" ~: do accountNameWithoutPostingType "(a)" `is` "a" ,"accountNameWithPostingType" ~: do accountNameWithPostingType VirtualPosting "[a]" `is` "(a)" ,"joinAccountNames" ~: do "a" `joinAccountNames` "b:c" `is` "a:b:c" "a" `joinAccountNames` "(b:c)" `is` "(a:b:c)" "[a]" `joinAccountNames` "(b:c)" `is` "[a:b:c]" "" `joinAccountNames` "a" `is` "a" ,"concatAccountNames" ~: do concatAccountNames [] `is` "" concatAccountNames ["a","(b)","[c:d]"] `is` "(a:b:c:d)" ] hledger-lib-1.2/Hledger/Data/RawOptions.hs0000644000000000000000000000324613066746043016622 0ustar0000000000000000{-| hledger's cmdargs modes parse command-line arguments to an intermediate format, RawOpts (an association list), rather than a fixed ADT like CliOpts. This allows the modes and flags to be reused more easily by hledger commands/scripts in this and other packages. -} module Hledger.Data.RawOptions ( RawOpts, setopt, setboolopt, inRawOpts, boolopt, stringopt, maybestringopt, listofstringopt, intopt, maybeintopt ) where import Data.Maybe import qualified Data.Text as T import Safe import Hledger.Utils -- | The result of running cmdargs: an association list of option names to string values. type RawOpts = [(String,String)] setopt :: String -> String -> RawOpts -> RawOpts setopt name val = (++ [(name, quoteIfNeeded $ val)]) setboolopt :: String -> RawOpts -> RawOpts setboolopt name = (++ [(name,"")]) -- | Is the named option present ? inRawOpts :: String -> RawOpts -> Bool inRawOpts name = isJust . lookup name boolopt :: String -> RawOpts -> Bool boolopt = inRawOpts maybestringopt :: String -> RawOpts -> Maybe String maybestringopt name = maybe Nothing (Just . T.unpack . stripquotes . T.pack) . lookup name . reverse stringopt :: String -> RawOpts -> String stringopt name = fromMaybe "" . maybestringopt name listofstringopt :: String -> RawOpts -> [String] listofstringopt name rawopts = [v | (k,v) <- rawopts, k==name] maybeintopt :: String -> RawOpts -> Maybe Int maybeintopt name rawopts = let ms = maybestringopt name rawopts in case ms of Nothing -> Nothing Just s -> Just $ readDef (usageError $ "could not parse "++name++" number: "++s) s intopt :: String -> RawOpts -> Int intopt name = fromMaybe 0 . maybeintopt name hledger-lib-1.2/Hledger/Data/StringFormat.hs0000644000000000000000000002131313035210046017110 0ustar0000000000000000-- | Parse format strings provided by --format, with awareness of -- hledger's report item fields. The formats are used by -- report-specific renderers like renderBalanceReportItem. {-# LANGUAGE FlexibleContexts, TypeFamilies #-} module Hledger.Data.StringFormat ( parseStringFormat , defaultStringFormatStyle , StringFormat(..) , StringFormatComponent(..) , ReportItemField(..) , tests ) where import Prelude () import Prelude.Compat import Numeric import Data.Char (isPrint) import Data.Maybe import Test.HUnit import Text.Megaparsec import Text.Megaparsec.String import Hledger.Utils.String (formatString) -- | A format specification/template to use when rendering a report line item as text. -- -- A format is a sequence of components; each is either a literal -- string, or a hledger report item field with specified width and -- justification whose value will be interpolated at render time. -- -- A component's value may be a multi-line string (or a -- multi-commodity amount), in which case the final string will be -- either single-line or a top or bottom-aligned multi-line string -- depending on the StringFormat variant used. -- -- Currently this is only used in the balance command's single-column -- mode, which provides a limited StringFormat renderer. -- data StringFormat = OneLine [StringFormatComponent] -- ^ multi-line values will be rendered on one line, comma-separated | TopAligned [StringFormatComponent] -- ^ values will be top-aligned (and bottom-padded to the same height) | BottomAligned [StringFormatComponent] -- ^ values will be bottom-aligned (and top-padded) deriving (Show, Eq) data StringFormatComponent = FormatLiteral String -- ^ Literal text to be rendered as-is | FormatField Bool (Maybe Int) (Maybe Int) ReportItemField -- ^ A data field to be formatted and interpolated. Parameters: -- -- - Left justify ? Right justified if false -- - Minimum width ? Will be space-padded if narrower than this -- - Maximum width ? Will be clipped if wider than this -- - Which of the standard hledger report item fields to interpolate deriving (Show, Eq) -- | An id identifying which report item field to interpolate. These -- are drawn from several hledger report types, so are not all -- applicable for a given report. data ReportItemField = AccountField -- ^ A posting or balance report item's account name | DefaultDateField -- ^ A posting or register or entry report item's date | DescriptionField -- ^ A posting or register or entry report item's description | TotalField -- ^ A balance or posting report item's balance or running total. -- Always rendered right-justified. | DepthSpacerField -- ^ A balance report item's indent level (which may be different from the account name depth). -- Rendered as this number of spaces, multiplied by the minimum width spec if any. | FieldNo Int -- ^ A report item's nth field. May be unimplemented. deriving (Show, Eq) ---------------------------------------------------------------------- -- renderStringFormat :: StringFormat -> Map String String -> String -- renderStringFormat fmt params = ---------------------------------------------------------------------- -- | Parse a string format specification, or return a parse error. parseStringFormat :: String -> Either String StringFormat parseStringFormat input = case (runParser (stringformatp <* eof) "(unknown)") input of Left y -> Left $ show y Right x -> Right x defaultStringFormatStyle = BottomAligned stringformatp :: Parser StringFormat stringformatp = do alignspec <- optional (try $ char '%' >> oneOf "^_,") let constructor = case alignspec of Just '^' -> TopAligned Just '_' -> BottomAligned Just ',' -> OneLine _ -> defaultStringFormatStyle constructor <$> many componentp componentp :: Parser StringFormatComponent componentp = formatliteralp <|> formatfieldp formatliteralp :: Parser StringFormatComponent formatliteralp = do s <- some c return $ FormatLiteral s where isPrintableButNotPercentage x = isPrint x && (not $ x == '%') c = (satisfy isPrintableButNotPercentage "printable character") <|> try (string "%%" >> return '%') formatfieldp :: Parser StringFormatComponent formatfieldp = do char '%' leftJustified <- optional (char '-') minWidth <- optional (some $ digitChar) maxWidth <- optional (do char '.'; some $ digitChar) -- TODO: Can this be (char '1') *> (some digitChar) char '(' f <- fieldp char ')' return $ FormatField (isJust leftJustified) (parseDec minWidth) (parseDec maxWidth) f where parseDec s = case s of Just text -> Just m where ((m,_):_) = readDec text _ -> Nothing fieldp :: Parser ReportItemField fieldp = do try (string "account" >> return AccountField) <|> try (string "depth_spacer" >> return DepthSpacerField) <|> try (string "date" >> return DescriptionField) <|> try (string "description" >> return DescriptionField) <|> try (string "total" >> return TotalField) <|> try (some digitChar >>= (\s -> return $ FieldNo $ read s)) ---------------------------------------------------------------------- testFormat :: StringFormatComponent -> String -> String -> Assertion testFormat fs value expected = assertEqual name expected actual where (name, actual) = case fs of FormatLiteral l -> ("literal", formatString False Nothing Nothing l) FormatField leftJustify min max _ -> ("field", formatString leftJustify min max value) testParser :: String -> StringFormat -> Assertion testParser s expected = case (parseStringFormat s) of Left error -> assertFailure $ show error Right actual -> assertEqual ("Input: " ++ s) expected actual tests = test [ formattingTests ++ parserTests ] formattingTests = [ testFormat (FormatLiteral " ") "" " " , testFormat (FormatField False Nothing Nothing DescriptionField) "description" "description" , testFormat (FormatField False (Just 20) Nothing DescriptionField) "description" " description" , testFormat (FormatField False Nothing (Just 20) DescriptionField) "description" "description" , testFormat (FormatField True Nothing (Just 20) DescriptionField) "description" "description" , testFormat (FormatField True (Just 20) Nothing DescriptionField) "description" "description " , testFormat (FormatField True (Just 20) (Just 20) DescriptionField) "description" "description " , testFormat (FormatField True Nothing (Just 3) DescriptionField) "description" "des" ] parserTests = [ testParser "" (defaultStringFormatStyle []) , testParser "D" (defaultStringFormatStyle [FormatLiteral "D"]) , testParser "%(date)" (defaultStringFormatStyle [FormatField False Nothing Nothing DescriptionField]) , testParser "%(total)" (defaultStringFormatStyle [FormatField False Nothing Nothing TotalField]) , testParser "^%(total)" (TopAligned [FormatField False Nothing Nothing TotalField]) , testParser "_%(total)" (BottomAligned [FormatField False Nothing Nothing TotalField]) , testParser ",%(total)" (OneLine [FormatField False Nothing Nothing TotalField]) , testParser "Hello %(date)!" (defaultStringFormatStyle [FormatLiteral "Hello ", FormatField False Nothing Nothing DescriptionField, FormatLiteral "!"]) , testParser "%-(date)" (defaultStringFormatStyle [FormatField True Nothing Nothing DescriptionField]) , testParser "%20(date)" (defaultStringFormatStyle [FormatField False (Just 20) Nothing DescriptionField]) , testParser "%.10(date)" (defaultStringFormatStyle [FormatField False Nothing (Just 10) DescriptionField]) , testParser "%20.10(date)" (defaultStringFormatStyle [FormatField False (Just 20) (Just 10) DescriptionField]) , testParser "%20(account) %.10(total)\n" (defaultStringFormatStyle [FormatField False (Just 20) Nothing AccountField , FormatLiteral " " , FormatField False Nothing (Just 10) TotalField , FormatLiteral "\n" ]) ] hledger-lib-1.2/Hledger/Data/Timeclock.hs0000644000000000000000000001350213035210046016404 0ustar0000000000000000{-| A 'TimeclockEntry' is a clock-in, clock-out, or other directive in a timeclock file (see timeclock.el or the command-line version). These can be converted to 'Transactions' and queried like a ledger. -} {-# LANGUAGE CPP, OverloadedStrings #-} module Hledger.Data.Timeclock where import Data.Maybe -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Data.Time.Clock import Data.Time.Format import Data.Time.LocalTime #if !(MIN_VERSION_time(1,5,0)) import System.Locale (defaultTimeLocale) #endif import Test.HUnit import Text.Printf import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Amount import Hledger.Data.Posting import Hledger.Data.Transaction instance Show TimeclockEntry where show t = printf "%s %s %s %s" (show $ tlcode t) (show $ tldatetime t) (tlaccount t) (tldescription t) instance Show TimeclockCode where show SetBalance = "b" show SetRequiredHours = "h" show In = "i" show Out = "o" show FinalOut = "O" instance Read TimeclockCode where readsPrec _ ('b' : xs) = [(SetBalance, xs)] readsPrec _ ('h' : xs) = [(SetRequiredHours, xs)] readsPrec _ ('i' : xs) = [(In, xs)] readsPrec _ ('o' : xs) = [(Out, xs)] readsPrec _ ('O' : xs) = [(FinalOut, xs)] readsPrec _ _ = [] -- | Convert time log entries to journal transactions. When there is no -- clockout, add one with the provided current time. Sessions crossing -- midnight are split into days to give accurate per-day totals. timeclockEntriesToTransactions :: LocalTime -> [TimeclockEntry] -> [Transaction] timeclockEntriesToTransactions _ [] = [] timeclockEntriesToTransactions now [i] | odate > idate = entryFromTimeclockInOut i o' : timeclockEntriesToTransactions now [i',o] | otherwise = [entryFromTimeclockInOut i o] where o = TimeclockEntry (tlsourcepos i) Out end "" "" end = if itime > now then itime else now (itime,otime) = (tldatetime i,tldatetime o) (idate,odate) = (localDay itime,localDay otime) o' = o{tldatetime=itime{localDay=idate, localTimeOfDay=TimeOfDay 23 59 59}} i' = i{tldatetime=itime{localDay=addDays 1 idate, localTimeOfDay=midnight}} timeclockEntriesToTransactions now (i:o:rest) | odate > idate = entryFromTimeclockInOut i o' : timeclockEntriesToTransactions now (i':o:rest) | otherwise = entryFromTimeclockInOut i o : timeclockEntriesToTransactions now rest where (itime,otime) = (tldatetime i,tldatetime o) (idate,odate) = (localDay itime,localDay otime) o' = o{tldatetime=itime{localDay=idate, localTimeOfDay=TimeOfDay 23 59 59}} i' = i{tldatetime=itime{localDay=addDays 1 idate, localTimeOfDay=midnight}} -- | Convert a timeclock clockin and clockout entry to an equivalent journal -- transaction, representing the time expenditure. Note this entry is not balanced, -- since we omit the \"assets:time\" transaction for simpler output. entryFromTimeclockInOut :: TimeclockEntry -> TimeclockEntry -> Transaction entryFromTimeclockInOut i o | otime >= itime = t | otherwise = error' $ "clock-out time less than clock-in time in:\n" ++ showTransaction t where t = Transaction { tindex = 0, tsourcepos = tlsourcepos i, tdate = idate, tdate2 = Nothing, tstatus = Cleared, tcode = "", tdescription = desc, tcomment = "", ttags = [], tpostings = ps, tpreceding_comment_lines="" } itime = tldatetime i otime = tldatetime o itod = localTimeOfDay itime otod = localTimeOfDay otime idate = localDay itime desc | T.null (tldescription i) = T.pack $ showtime itod ++ "-" ++ showtime otod | otherwise = tldescription i showtime = take 5 . show hours = elapsedSeconds (toutc otime) (toutc itime) / 3600 where toutc = localTimeToUTC utc acctname = tlaccount i amount = Mixed [hrs hours] ps = [posting{paccount=acctname, pamount=amount, ptype=VirtualPosting, ptransaction=Just t}] tests_Hledger_Data_Timeclock = TestList [ "timeclockEntriesToTransactions" ~: do today <- getCurrentDay now' <- getCurrentTime tz <- getCurrentTimeZone let now = utcToLocalTime tz now' nowstr = showtime now yesterday = prevday today clockin = TimeclockEntry nullsourcepos In mktime d = LocalTime d . fromMaybe midnight . #if MIN_VERSION_time(1,5,0) parseTimeM True defaultTimeLocale "%H:%M:%S" #else parseTime defaultTimeLocale "%H:%M:%S" #endif showtime = formatTime defaultTimeLocale "%H:%M" assertEntriesGiveStrings name es ss = assertEqual name ss (map (T.unpack . tdescription) $ timeclockEntriesToTransactions now es) assertEntriesGiveStrings "started yesterday, split session at midnight" [clockin (mktime yesterday "23:00:00") "" ""] ["23:00-23:59","00:00-"++nowstr] assertEntriesGiveStrings "split multi-day sessions at each midnight" [clockin (mktime (addDays (-2) today) "23:00:00") "" ""] ["23:00-23:59","00:00-23:59","00:00-"++nowstr] assertEntriesGiveStrings "auto-clock-out if needed" [clockin (mktime today "00:00:00") "" ""] ["00:00-"++nowstr] let future = utcToLocalTime tz $ addUTCTime 100 now' futurestr = showtime future assertEntriesGiveStrings "use the clockin time for auto-clockout if it's in the future" [clockin future "" ""] [printf "%s-%s" futurestr futurestr] ] hledger-lib-1.2/Hledger/Data/Transaction.hs0000644000000000000000000007435613066173044017007 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-| A 'Transaction' represents a movement of some commodity(ies) between two or more accounts. It consists of multiple account 'Posting's which balance to zero, a date, and optional extras like description, cleared status, and tags. -} {-# LANGUAGE OverloadedStrings, LambdaCase #-} module Hledger.Data.Transaction ( -- * Transaction nullsourcepos, nulltransaction, txnTieKnot, txnUntieKnot, -- * operations showAccountName, hasRealPostings, realPostings, assignmentPostings, virtualPostings, balancedVirtualPostings, transactionsPostings, isTransactionBalanced, -- nonzerobalanceerror, -- * date operations transactionDate2, -- * arithmetic transactionPostingBalances, balanceTransaction, balanceTransactionUpdate, -- * rendering showTransaction, showTransactionUnelided, showTransactionUnelidedOneLineAmounts, showPostingLine, showPostingLines, -- * GenericSourcePos sourceFilePath, sourceFirstLine, showGenericSourcePos, -- * misc. tests_Hledger_Data_Transaction ) where import Data.List import Control.Monad.Except import Control.Monad.Identity import Data.Maybe import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Test.HUnit import Text.Printf import qualified Data.Map as Map import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Posting import Hledger.Data.Amount instance Show Transaction where show = showTransactionUnelided instance Show ModifierTransaction where show t = "= " ++ T.unpack (mtvalueexpr t) ++ "\n" ++ unlines (map show (mtpostings t)) instance Show PeriodicTransaction where show t = "~ " ++ T.unpack (ptperiodicexpr t) ++ "\n" ++ unlines (map show (ptpostings t)) sourceFilePath :: GenericSourcePos -> FilePath sourceFilePath = \case GenericSourcePos fp _ _ -> fp JournalSourcePos fp _ -> fp sourceFirstLine :: GenericSourcePos -> Int sourceFirstLine = \case GenericSourcePos _ line _ -> line JournalSourcePos _ (line, _) -> line showGenericSourcePos :: GenericSourcePos -> String showGenericSourcePos = \case GenericSourcePos fp line column -> show fp ++ " (line " ++ show line ++ ", column " ++ show column ++ ")" JournalSourcePos fp (line, line') -> show fp ++ " (lines " ++ show line ++ "-" ++ show line' ++ ")" nullsourcepos :: GenericSourcePos nullsourcepos = GenericSourcePos "" 1 1 nulltransaction :: Transaction nulltransaction = Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=nulldate, tdate2=Nothing, tstatus=Uncleared, tcode="", tdescription="", tcomment="", ttags=[], tpostings=[], tpreceding_comment_lines="" } {-| Show a journal transaction, formatted for the print command. ledger 2.x's standard format looks like this: @ yyyy/mm/dd[ *][ CODE] description......... [ ; comment...............] account name 1..................... ...$amount1[ ; comment...............] account name 2..................... ..$-amount1[ ; comment...............] pcodewidth = no limit -- 10 -- mimicking ledger layout. pdescwidth = no limit -- 20 -- I don't remember what these mean, pacctwidth = 35 minimum, no maximum -- they were important at the time. pamtwidth = 11 pcommentwidth = no limit -- 22 @ -} showTransaction :: Transaction -> String showTransaction = showTransactionHelper True False showTransactionUnelided :: Transaction -> String showTransactionUnelided = showTransactionHelper False False tests_showTransactionUnelided = [ "showTransactionUnelided" ~: do let t `gives` s = assertEqual "" s (showTransactionUnelided t) nulltransaction `gives` "0000/01/01\n\n" nulltransaction{ tdate=parsedate "2012/05/14", tdate2=Just $ parsedate "2012/05/15", tstatus=Uncleared, tcode="code", tdescription="desc", tcomment="tcomment1\ntcomment2\n", ttags=[("ttag1","val1")], tpostings=[ nullposting{ pstatus=Cleared, paccount="a", pamount=Mixed [usd 1, hrs 2], pcomment="\npcomment2\n", ptype=RegularPosting, ptags=[("ptag1","val1"),("ptag2","val2")] } ] } `gives` unlines [ "2012/05/14=2012/05/15 (code) desc ; tcomment1", " ; tcomment2", " * a $1.00", " ; pcomment2", " * a 2.00h", " ; pcomment2", "" ] ] showTransactionUnelidedOneLineAmounts :: Transaction -> String showTransactionUnelidedOneLineAmounts = showTransactionHelper False True -- cf showPosting showTransactionHelper :: Bool -> Bool -> Transaction -> String showTransactionHelper elide onelineamounts t = unlines $ [descriptionline] ++ newlinecomments ++ (postingsAsLines elide onelineamounts t (tpostings t)) ++ [""] where descriptionline = rstrip $ concat [date, status, code, desc, samelinecomment] date = showDate (tdate t) ++ maybe "" (("="++) . showDate) (tdate2 t) status | tstatus t == Cleared = " *" | tstatus t == Pending = " !" | otherwise = "" code = if T.length (tcode t) > 0 then printf " (%s)" $ T.unpack $ tcode t else "" desc = if null d then "" else " " ++ d where d = T.unpack $ tdescription t (samelinecomment, newlinecomments) = case renderCommentLines (tcomment t) of [] -> ("",[]) c:cs -> (c,cs) -- Render a transaction or posting's comment as indented, semicolon-prefixed comment lines. renderCommentLines :: Text -> [String] renderCommentLines t = case lines $ T.unpack t of ("":ls) -> "":map commentprefix ls ls -> map commentprefix ls where commentprefix = indent . ("; "++) -- -- Render a transaction or posting's comment as semicolon-prefixed comment lines - -- -- an inline (same-line) comment if it's a single line, otherwise multiple indented lines. -- commentLines' :: String -> (String, [String]) -- commentLines' s -- | null s = ("", []) -- | length ls == 1 = (prefix $ head ls, []) -- | otherwise = ("", (prefix $ head ls):(map prefix $ tail ls)) -- where -- ls = lines s -- prefix = indent . (";"++) postingsAsLines :: Bool -> Bool -> Transaction -> [Posting] -> [String] postingsAsLines elide onelineamounts t ps | elide && length ps > 1 && isTransactionBalanced Nothing t -- imprecise balanced check = (concatMap (postingAsLines False onelineamounts ps) $ init ps) ++ postingAsLines True onelineamounts ps (last ps) | otherwise = concatMap (postingAsLines False onelineamounts ps) ps postingAsLines :: Bool -> Bool -> [Posting] -> Posting -> [String] postingAsLines elideamount onelineamounts ps p = concat [ postingblock ++ newlinecomments | postingblock <- postingblocks] where postingblocks = [map rstrip $ lines $ concatTopPadded [account, " ", amount, assertion, samelinecomment] | amount <- shownAmounts] assertion = maybe "" ((" = " ++) . showAmountWithZeroCommodity) $ pbalanceassertion p account = indent $ showstatus p ++ fitString (Just acctwidth) Nothing False True (showAccountName Nothing (ptype p) (paccount p)) where showstatus p = if pstatus p == Cleared then "* " else "" acctwidth = maximum $ map (textWidth . paccount) ps -- currently prices are considered part of the amount string when right-aligning amounts shownAmounts | elideamount = [""] | onelineamounts = [fitString (Just amtwidth) Nothing False False $ showMixedAmountOneLine $ pamount p] | otherwise = map (fitStringMulti (Just amtwidth) Nothing False False . showAmount ) . amounts $ pamount p where amtwidth = maximum $ 12 : map (strWidth . showMixedAmount . pamount) ps -- min. 12 for backwards compatibility (samelinecomment, newlinecomments) = case renderCommentLines (pcomment p) of [] -> ("",[]) c:cs -> (c,cs) -- used in balance assertion error showPostingLine p = indent $ if pstatus p == Cleared then "* " else "" ++ showAccountName Nothing (ptype p) (paccount p) ++ " " ++ showMixedAmountOneLine (pamount p) -- | Produce posting line with all comment lines associated with it showPostingLines :: Posting -> [String] showPostingLines p = postingAsLines False False ps p where ps | Just t <- ptransaction p = tpostings t | otherwise = [p] tests_postingAsLines = [ "postingAsLines" ~: do let p `gives` ls = assertEqual (show p) ls (postingAsLines False False [p] p) posting `gives` [] posting{ pstatus=Cleared, paccount="a", pamount=Mixed [usd 1, hrs 2], pcomment="pcomment1\npcomment2\n tag3: val3 \n", ptype=RegularPosting, ptags=[("ptag1","val1"),("ptag2","val2")] } `gives` [ " * a $1.00 ; pcomment1", " ; pcomment2", " ; tag3: val3 ", " * a 2.00h ; pcomment1", " ; pcomment2", " ; tag3: val3 " ] ] tests_inference = [ "inferBalancingAmount" ~: do let p `gives` p' = assertEqual (show p) (Right p') $ inferTransaction p inferTransaction :: Transaction -> Either String Transaction inferTransaction = runIdentity . runExceptT . inferBalancingAmount (\_ _ -> return ()) nulltransaction `gives` nulltransaction nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` missingamt ]} `gives` nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` usd 5 ]} nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` (eur 3 @@ usd 4), "c" `post` missingamt ]} `gives` nulltransaction{ tpostings=[ "a" `post` usd (-5), "b" `post` (eur 3 @@ usd 4), "c" `post` usd 1 ]} ] indent :: String -> String indent = (" "++) -- | Show an account name, clipped to the given width if any, and -- appropriately bracketed/parenthesised for the given posting type. showAccountName :: Maybe Int -> PostingType -> AccountName -> String showAccountName w = fmt where fmt RegularPosting = take w' . T.unpack fmt VirtualPosting = parenthesise . reverse . take (w'-2) . reverse . T.unpack fmt BalancedVirtualPosting = bracket . reverse . take (w'-2) . reverse . T.unpack w' = fromMaybe 999999 w parenthesise :: String -> String parenthesise s = "("++s++")" bracket :: String -> String bracket s = "["++s++"]" hasRealPostings :: Transaction -> Bool hasRealPostings = not . null . realPostings realPostings :: Transaction -> [Posting] realPostings = filter isReal . tpostings assignmentPostings :: Transaction -> [Posting] assignmentPostings = filter isAssignment . tpostings virtualPostings :: Transaction -> [Posting] virtualPostings = filter isVirtual . tpostings balancedVirtualPostings :: Transaction -> [Posting] balancedVirtualPostings = filter isBalancedVirtual . tpostings transactionsPostings :: [Transaction] -> [Posting] transactionsPostings = concat . map tpostings -- | Get the sums of a transaction's real, virtual, and balanced virtual postings. transactionPostingBalances :: Transaction -> (MixedAmount,MixedAmount,MixedAmount) transactionPostingBalances t = (sumPostings $ realPostings t ,sumPostings $ virtualPostings t ,sumPostings $ balancedVirtualPostings t) -- | Is this transaction balanced ? A balanced transaction's real -- (non-virtual) postings sum to 0, and any balanced virtual postings -- also sum to 0. isTransactionBalanced :: Maybe (Map.Map CommoditySymbol AmountStyle) -> Transaction -> Bool isTransactionBalanced styles t = -- isReallyZeroMixedAmountCost rsum && isReallyZeroMixedAmountCost bvsum isZeroMixedAmount rsum' && isZeroMixedAmount bvsum' where (rsum, _, bvsum) = transactionPostingBalances t rsum' = canonicalise $ costOfMixedAmount rsum bvsum' = canonicalise $ costOfMixedAmount bvsum canonicalise = maybe id canonicaliseMixedAmount styles -- | Ensure this transaction is balanced, possibly inferring a missing -- amount or conversion price(s), or return an error message. -- Balancing is affected by commodity display precisions, so those can -- (optionally) be provided. -- -- this fails for example, if there are several missing amounts -- (possibly with balance assignments) balanceTransaction :: Maybe (Map.Map CommoditySymbol AmountStyle) -> Transaction -> Either String Transaction balanceTransaction stylemap = runIdentity . runExceptT . balanceTransactionUpdate (\_ _ -> return ()) stylemap -- | More general version of 'balanceTransaction' that takes an update -- function balanceTransactionUpdate :: MonadError String m => (AccountName -> MixedAmount -> m ()) -- ^ update function -> Maybe (Map.Map CommoditySymbol AmountStyle) -> Transaction -> m Transaction balanceTransactionUpdate update styles t = finalize =<< inferBalancingAmount update t where finalize t' = let t'' = inferBalancingPrices t' in if isTransactionBalanced styles t'' then return $ txnTieKnot t'' else throwError $ printerr $ nonzerobalanceerror t'' printerr s = intercalate "\n" [s, showTransactionUnelided t] nonzerobalanceerror :: Transaction -> String nonzerobalanceerror t = printf "could not balance this transaction (%s%s%s)" rmsg sep bvmsg where (rsum, _, bvsum) = transactionPostingBalances t rmsg | isReallyZeroMixedAmountCost rsum = "" | otherwise = "real postings are off by " ++ showMixedAmount (costOfMixedAmount rsum) bvmsg | isReallyZeroMixedAmountCost bvsum = "" | otherwise = "balanced virtual postings are off by " ++ showMixedAmount (costOfMixedAmount bvsum) sep = if not (null rmsg) && not (null bvmsg) then "; " else "" :: String -- | Infer up to one missing amount for this transactions's real postings, and -- likewise for its balanced virtual postings, if needed; or return an error -- message if we can't. -- -- We can infer a missing amount when there are multiple postings and exactly -- one of them is amountless. If the amounts had price(s) the inferred amount -- have the same price(s), and will be converted to the price commodity. inferBalancingAmount :: MonadError String m => (AccountName -> MixedAmount -> m ()) -- ^ update function -> Transaction -> m Transaction inferBalancingAmount update t@Transaction{tpostings=ps} | length amountlessrealps > 1 = throwError $ printerr "could not balance this transaction - can't have more than one real posting with no amount (remember to put 2 or more spaces before amounts)" | length amountlessbvps > 1 = throwError $ printerr "could not balance this transaction - can't have more than one balanced virtual posting with no amount (remember to put 2 or more spaces before amounts)" | otherwise = do postings <- mapM inferamount ps return t{tpostings=postings} where printerr s = intercalate "\n" [s, showTransactionUnelided t] (amountfulrealps, amountlessrealps) = partition hasAmount (realPostings t) realsum = sumStrict $ map pamount amountfulrealps (amountfulbvps, amountlessbvps) = partition hasAmount (balancedVirtualPostings t) bvsum = sumStrict $ map pamount amountfulbvps inferamount p@Posting{ptype=RegularPosting} | not (hasAmount p) = updateAmount p realsum inferamount p@Posting{ptype=BalancedVirtualPosting} | not (hasAmount p) = updateAmount p bvsum inferamount p = return p updateAmount p amt = update (paccount p) amt' >> return p { pamount=amt', porigin=Just $ originalPosting p } where amt' = normaliseMixedAmount $ costOfMixedAmount (-amt) -- | Infer prices for this transaction's posting amounts, if needed to make -- the postings balance, and if possible. This is done once for the real -- postings and again (separately) for the balanced virtual postings. When -- it's not possible, the transaction is left unchanged. -- -- The simplest example is a transaction with two postings, each in a -- different commodity, with no prices specified. In this case we'll add a -- price to the first posting such that it can be converted to the commodity -- of the second posting (with -B), and such that the postings balance. -- -- In general, we can infer a conversion price when the sum of posting amounts -- contains exactly two different commodities and no explicit prices. Also -- all postings are expected to contain an explicit amount (no missing -- amounts) in a single commodity. Otherwise no price inferring is attempted. -- -- The transaction itself could contain more than two commodities, and/or -- prices, if they cancel out; what matters is that the sum of posting amounts -- contains exactly two commodities and zero prices. -- -- There can also be more than two postings in either of the commodities. -- -- We want to avoid excessive display of digits when the calculated price is -- an irrational number, while hopefully also ensuring the displayed numbers -- make sense if the user does a manual calculation. This is (mostly) achieved -- in two ways: -- -- - when there is only one posting in the "from" commodity, a total price -- (@@) is used, and all available decimal digits are shown -- -- - otherwise, a suitable averaged unit price (@) is applied to the relevant -- postings, with display precision equal to the summed display precisions -- of the two commodities being converted between, or 2, whichever is larger. -- -- (We don't always calculate a good-looking display precision for unit prices -- when the commodity display precisions are low, eg when a journal doesn't -- use any decimal places. The minimum of 2 helps make the prices shown by the -- print command a bit less surprising in this case. Could do better.) -- inferBalancingPrices :: Transaction -> Transaction inferBalancingPrices t@Transaction{tpostings=ps} = t{tpostings=ps'} where ps' = map (priceInferrerFor t BalancedVirtualPosting) $ map (priceInferrerFor t RegularPosting) $ ps -- | Generate a posting update function which assigns a suitable balancing -- price to the posting, if and as appropriate for the given transaction and -- posting type (real or balanced virtual). priceInferrerFor :: Transaction -> PostingType -> (Posting -> Posting) priceInferrerFor t pt = inferprice where postings = filter ((==pt).ptype) $ tpostings t pmixedamounts = map pamount postings pamounts = concatMap amounts pmixedamounts pcommodities = map acommodity pamounts sumamounts = amounts $ sumStrict pmixedamounts -- sum normalises to one amount per commodity & price sumcommodities = map acommodity sumamounts sumprices = filter (/=NoPrice) $ map aprice sumamounts caninferprices = length sumcommodities == 2 && null sumprices inferprice p@Posting{pamount=Mixed [a]} | caninferprices && ptype p == pt && acommodity a == fromcommodity = p{pamount=Mixed [a{aprice=conversionprice}], porigin=Just $ originalPosting p} where fromcommodity = head $ filter (`elem` sumcommodities) pcommodities -- these heads are ugly but should be safe conversionprice | fromcount==1 = TotalPrice $ abs toamount `withPrecision` maxprecision | otherwise = UnitPrice $ abs unitprice `withPrecision` unitprecision where fromcount = length $ filter ((==fromcommodity).acommodity) pamounts fromamount = head $ filter ((==fromcommodity).acommodity) sumamounts tocommodity = head $ filter (/=fromcommodity) sumcommodities toamount = head $ filter ((==tocommodity).acommodity) sumamounts unitprice = toamount `divideAmount` (aquantity fromamount) unitprecision = max 2 ((asprecision $ astyle $ toamount) + (asprecision $ astyle $ fromamount)) inferprice p = p -- Get a transaction's secondary date, defaulting to the primary date. transactionDate2 :: Transaction -> Day transactionDate2 t = fromMaybe (tdate t) $ tdate2 t -- | Ensure a transaction's postings refer back to it, so that eg -- relatedPostings works right. txnTieKnot :: Transaction -> Transaction txnTieKnot t@Transaction{tpostings=ps} = t' where t' = t{tpostings=map (postingSetTransaction t') ps} -- | Ensure a transaction's postings do not refer back to it, so that eg -- recursiveSize and GHCI's :sprint work right. txnUntieKnot :: Transaction -> Transaction txnUntieKnot t@Transaction{tpostings=ps} = t{tpostings=map (\p -> p{ptransaction=Nothing}) ps} -- | Set a posting's parent transaction. postingSetTransaction :: Transaction -> Posting -> Posting postingSetTransaction t p = p{ptransaction=Just t} tests_Hledger_Data_Transaction = TestList $ concat [ tests_postingAsLines, tests_showTransactionUnelided, tests_inference, [ "showTransaction" ~: do assertEqual "show a balanced transaction, eliding last amount" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking" ,"" ]) (let t = Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18], ptransaction=Just t} ,posting{paccount="assets:checking", pamount=Mixed [usd (-47.18)], ptransaction=Just t} ] "" in showTransaction t) ,"showTransaction" ~: do assertEqual "show a balanced transaction, no eliding" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking $-47.18" ,"" ]) (let t = Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18], ptransaction=Just t} ,posting{paccount="assets:checking", pamount=Mixed [usd (-47.18)], ptransaction=Just t} ] "" in showTransactionUnelided t) -- document some cases that arise in debug/testing: ,"showTransaction" ~: do assertEqual "show an unbalanced transaction, should not elide" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking $-47.19" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18]} ,posting{paccount="assets:checking", pamount=Mixed [usd (-47.19)]} ] "")) ,"showTransaction" ~: do assertEqual "show an unbalanced transaction with one posting, should not elide" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=Mixed [usd 47.18]} ] "")) ,"showTransaction" ~: do assertEqual "show a transaction with one posting and a missing amount" (unlines ["2007/01/28 coopportunity" ," expenses:food:groceries" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "coopportunity" "" [] [posting{paccount="expenses:food:groceries", pamount=missingmixedamt} ] "")) ,"showTransaction" ~: do assertEqual "show a transaction with a priced commodityless amount" (unlines ["2010/01/01 x" ," a 1 @ $2" ," b" ,"" ]) (showTransaction (txnTieKnot $ Transaction 0 nullsourcepos (parsedate "2010/01/01") Nothing Uncleared "" "x" "" [] [posting{paccount="a", pamount=Mixed [num 1 `at` (usd 2 `withPrecision` 0)]} ,posting{paccount="b", pamount= missingmixedamt} ] "")) ,"balanceTransaction" ~: do assertBool "detect unbalanced entry, sign error" (isLeft $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "test" "" [] [posting{paccount="a", pamount=Mixed [usd 1]} ,posting{paccount="b", pamount=Mixed [usd 1]} ] "")) assertBool "detect unbalanced entry, multiple missing amounts" (isLeft $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "test" "" [] [posting{paccount="a", pamount=missingmixedamt} ,posting{paccount="b", pamount=missingmixedamt} ] "")) let e = balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2007/01/28") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1]} ,posting{paccount="b", pamount=missingmixedamt} ] "") assertBool "balanceTransaction allows one missing amount" (isRight e) assertEqual "balancing amount is inferred" (Mixed [usd (-1)]) (case e of Right e' -> (pamount $ last $ tpostings e') Left _ -> error' "should not happen") let e = balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2011/01/01") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1.35]} ,posting{paccount="b", pamount=Mixed [eur (-1)]} ] "") assertBool "balanceTransaction can infer conversion price" (isRight e) assertEqual "balancing conversion price is inferred" (Mixed [usd 1.35 @@ (eur 1 `withPrecision` maxprecision)]) (case e of Right e' -> (pamount $ head $ tpostings e') Left _ -> error' "should not happen") assertBool "balanceTransaction balances based on cost if there are unit prices" (isRight $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2011/01/01") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1 `at` eur 2]} ,posting{paccount="a", pamount=Mixed [usd (-2) `at` eur 1]} ] "")) assertBool "balanceTransaction balances based on cost if there are total prices" (isRight $ balanceTransaction Nothing (Transaction 0 nullsourcepos (parsedate "2011/01/01") Nothing Uncleared "" "" "" [] [posting{paccount="a", pamount=Mixed [usd 1 @@ eur 1]} ,posting{paccount="a", pamount=Mixed [usd (-2) @@ eur 1]} ] "")) ,"isTransactionBalanced" ~: do let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ] "" assertBool "detect balanced" (isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.01)], ptransaction=Just t} ] "" assertBool "detect unbalanced" (not $ isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ] "" assertBool "detect unbalanced, one posting" (not $ isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 0], ptransaction=Just t} ] "" assertBool "one zero posting is considered balanced for now" (isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ,posting{paccount="d", pamount=Mixed [usd 100], ptype=VirtualPosting, ptransaction=Just t} ] "" assertBool "virtual postings don't need to balance" (isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ,posting{paccount="d", pamount=Mixed [usd 100], ptype=BalancedVirtualPosting, ptransaction=Just t} ] "" assertBool "balanced virtual postings need to balance among themselves" (not $ isTransactionBalanced Nothing t) let t = Transaction 0 nullsourcepos (parsedate "2009/01/01") Nothing Uncleared "" "a" "" [] [posting{paccount="b", pamount=Mixed [usd 1.00], ptransaction=Just t} ,posting{paccount="c", pamount=Mixed [usd (-1.00)], ptransaction=Just t} ,posting{paccount="d", pamount=Mixed [usd 100], ptype=BalancedVirtualPosting, ptransaction=Just t} ,posting{paccount="3", pamount=Mixed [usd (-100)], ptype=BalancedVirtualPosting, ptransaction=Just t} ] "" assertBool "balanced virtual postings need to balance among themselves (2)" (isTransactionBalanced Nothing t) ]] hledger-lib-1.2/Hledger/Data/Types.hs0000644000000000000000000003607113066173044015616 0ustar0000000000000000{-# LANGUAGE CPP, DeriveDataTypeable, StandaloneDeriving, DeriveGeneric, TypeSynonymInstances, FlexibleInstances #-} {-| Most data types are defined here to avoid import cycles. Here is an overview of the hledger data model: > Journal -- a journal is read from one or more data files. It contains.. > [Transaction] -- journal transactions (aka entries), which have date, cleared status, code, description and.. > [Posting] -- multiple account postings, which have account name and amount > [MarketPrice] -- historical market prices for commodities > > Ledger -- a ledger is derived from a journal, by applying a filter specification and doing some further processing. It contains.. > Journal -- a filtered copy of the original journal, containing only the transactions and postings we are interested in > [Account] -- all accounts, in tree order beginning with a "root" account", with their balances and sub/parent accounts For more detailed documentation on each type, see the corresponding modules. -} module Hledger.Data.Types where import GHC.Generics (Generic) import Control.DeepSeq (NFData) import Control.Monad.Except (ExceptT) import Data.Data import Data.Decimal import Data.Default import Text.Blaze (ToMarkup(..)) import qualified Data.Map as M import Data.Text (Text) -- import qualified Data.Text as T import Data.Time.Calendar import Data.Time.LocalTime import System.Time (ClockTime(..)) import Hledger.Utils.Regex type SmartDate = (String,String,String) data WhichDate = PrimaryDate | SecondaryDate deriving (Eq,Show) data DateSpan = DateSpan (Maybe Day) (Maybe Day) deriving (Eq,Ord,Data,Generic,Typeable) instance NFData DateSpan -- synonyms for various date-related scalars type Year = Integer type Month = Int -- 1-12 type Quarter = Int -- 1-4 type YearWeek = Int -- 1-52 type MonthWeek = Int -- 1-5 type YearDay = Int -- 1-366 type MonthDay = Int -- 1-31 type WeekDay = Int -- 1-7 -- Typical report periods (spans of time), both finite and open-ended. -- A richer abstraction than DateSpan. data Period = DayPeriod Day | WeekPeriod Day | MonthPeriod Year Month | QuarterPeriod Year Quarter | YearPeriod Year | PeriodBetween Day Day | PeriodFrom Day | PeriodTo Day | PeriodAll deriving (Eq,Ord,Show,Data,Generic,Typeable) instance Default Period where def = PeriodAll ---- Typical report period/subperiod durations, from a day to a year. --data Duration = -- DayLong -- WeekLong -- MonthLong -- QuarterLong -- YearLong -- deriving (Eq,Ord,Show,Data,Generic,Typeable) -- Ways in which a period can be divided into subperiods. data Interval = NoInterval | Days Int | Weeks Int | Months Int | Quarters Int | Years Int | DayOfMonth Int | DayOfWeek Int -- WeekOfYear Int -- MonthOfYear Int -- QuarterOfYear Int deriving (Eq,Show,Ord,Data,Generic,Typeable) instance Default Interval where def = NoInterval instance NFData Interval type AccountName = Text data AccountAlias = BasicAlias AccountName AccountName | RegexAlias Regexp Replacement deriving (Eq, Read, Show, Ord, Data, Generic, Typeable) instance NFData AccountAlias data Side = L | R deriving (Eq,Show,Read,Ord,Typeable,Data,Generic) instance NFData Side -- | The basic numeric type used in amounts. type Quantity = Decimal deriving instance Data (Quantity) -- The following is for hledger-web, and requires blaze-markup. -- Doing it here avoids needing a matching flag on the hledger-web package. instance ToMarkup (Quantity) where toMarkup = toMarkup . show -- | An amount's price (none, per unit, or total) in another commodity. -- Note the price should be a positive number, although this is not enforced. data Price = NoPrice | UnitPrice Amount | TotalPrice Amount deriving (Eq,Ord,Typeable,Data,Generic) instance NFData Price -- | Display style for an amount. data AmountStyle = AmountStyle { ascommodityside :: Side, -- ^ does the symbol appear on the left or the right ? ascommodityspaced :: Bool, -- ^ space between symbol and quantity ? asprecision :: !Int, -- ^ number of digits displayed after the decimal point asdecimalpoint :: Maybe Char, -- ^ character used as decimal point: period or comma. Nothing means "unspecified, use default" asdigitgroups :: Maybe DigitGroupStyle -- ^ style for displaying digit groups, if any } deriving (Eq,Ord,Read,Show,Typeable,Data,Generic) instance NFData AmountStyle -- | A style for displaying digit groups in the integer part of a -- floating point number. It consists of the character used to -- separate groups (comma or period, whichever is not used as decimal -- point), and the size of each group, starting with the one nearest -- the decimal point. The last group size is assumed to repeat. Eg, -- comma between thousands is DigitGroups ',' [3]. data DigitGroupStyle = DigitGroups Char [Int] deriving (Eq,Ord,Read,Show,Typeable,Data,Generic) instance NFData DigitGroupStyle type CommoditySymbol = Text data Commodity = Commodity { csymbol :: CommoditySymbol, cformat :: Maybe AmountStyle } deriving (Show,Eq,Data,Generic) --,Ord,Typeable,Data,Generic) instance NFData Commodity data Amount = Amount { acommodity :: CommoditySymbol, aquantity :: Quantity, aprice :: Price, -- ^ the (fixed) price for this amount, if any astyle :: AmountStyle } deriving (Eq,Ord,Typeable,Data,Generic) instance NFData Amount newtype MixedAmount = Mixed [Amount] deriving (Eq,Ord,Typeable,Data,Generic) instance NFData MixedAmount data PostingType = RegularPosting | VirtualPosting | BalancedVirtualPosting deriving (Eq,Show,Typeable,Data,Generic) instance NFData PostingType type TagName = Text type TagValue = Text type Tag = (TagName, TagValue) -- ^ A tag name and (possibly empty) value. data ClearedStatus = Uncleared | Pending | Cleared deriving (Eq,Ord,Typeable,Data,Generic) instance NFData ClearedStatus instance Show ClearedStatus where -- custom show.. bad idea.. don't do it.. show Uncleared = "" show Pending = "!" show Cleared = "*" data Posting = Posting { pdate :: Maybe Day, -- ^ this posting's date, if different from the transaction's pdate2 :: Maybe Day, -- ^ this posting's secondary date, if different from the transaction's pstatus :: ClearedStatus, paccount :: AccountName, pamount :: MixedAmount, pcomment :: Text, -- ^ this posting's comment lines, as a single non-indented multi-line string ptype :: PostingType, ptags :: [Tag], -- ^ tag names and values, extracted from the comment pbalanceassertion :: Maybe Amount, -- ^ optional: the expected balance in this commodity in the account after this posting ptransaction :: Maybe Transaction, -- ^ this posting's parent transaction (co-recursive types). -- Tying this knot gets tedious, Maybe makes it easier/optional. porigin :: Maybe Posting -- ^ original posting if this one is result of any transformations (one level only) } deriving (Typeable,Data,Generic) instance NFData Posting -- The equality test for postings ignores the parent transaction's -- identity, to avoid infinite loops. instance Eq Posting where (==) (Posting a1 b1 c1 d1 e1 f1 g1 h1 i1 _ _) (Posting a2 b2 c2 d2 e2 f2 g2 h2 i2 _ _) = a1==a2 && b1==b2 && c1==c2 && d1==d2 && e1==e2 && f1==f2 && g1==g2 && h1==h2 && i1==i2 -- TODO: needs renaming, or removal if no longer needed. See also TextPosition in Hledger.UI.Editor -- | The position of parse errors (eg), like parsec's SourcePos but generic. data GenericSourcePos = GenericSourcePos FilePath Int Int -- ^ name, 1-based line number and 1-based column number. | JournalSourcePos FilePath (Int, Int) -- ^ file name, inclusive range of 1-based line numbers (first, last). deriving (Eq, Read, Show, Ord, Data, Generic, Typeable) instance NFData GenericSourcePos data Transaction = Transaction { tindex :: Integer, -- ^ this transaction's 1-based position in the input stream, or 0 when not available tsourcepos :: GenericSourcePos, tdate :: Day, tdate2 :: Maybe Day, tstatus :: ClearedStatus, tcode :: Text, tdescription :: Text, tcomment :: Text, -- ^ this transaction's comment lines, as a single non-indented multi-line string ttags :: [Tag], -- ^ tag names and values, extracted from the comment tpostings :: [Posting], -- ^ this transaction's postings tpreceding_comment_lines :: Text -- ^ any comment lines immediately preceding this transaction } deriving (Eq,Typeable,Data,Generic) instance NFData Transaction data ModifierTransaction = ModifierTransaction { mtvalueexpr :: Text, mtpostings :: [Posting] } deriving (Eq,Typeable,Data,Generic) instance NFData ModifierTransaction data PeriodicTransaction = PeriodicTransaction { ptperiodicexpr :: Text, ptpostings :: [Posting] } deriving (Eq,Typeable,Data,Generic) instance NFData PeriodicTransaction data TimeclockCode = SetBalance | SetRequiredHours | In | Out | FinalOut deriving (Eq,Ord,Typeable,Data,Generic) instance NFData TimeclockCode data TimeclockEntry = TimeclockEntry { tlsourcepos :: GenericSourcePos, tlcode :: TimeclockCode, tldatetime :: LocalTime, tlaccount :: AccountName, tldescription :: Text } deriving (Eq,Ord,Typeable,Data,Generic) instance NFData TimeclockEntry data MarketPrice = MarketPrice { mpdate :: Day, mpcommodity :: CommoditySymbol, mpamount :: Amount } deriving (Eq,Ord,Typeable,Data,Generic) -- & Show (in Amount.hs) instance NFData MarketPrice -- | A Journal, containing transactions and various other things. -- The basic data model for hledger. -- -- This is used during parsing (as the type alias ParsedJournal), and -- then finalised/validated for use as a Journal. Some extra -- parsing-related fields are included for convenience, at least for -- now. In a ParsedJournal these are updated as parsing proceeds, in a -- Journal they represent the final state at end of parsing (used eg -- by the add command). -- data Journal = Journal { -- parsing-related data jparsedefaultyear :: Maybe Year -- ^ the current default year, specified by the most recent Y directive (or current date) ,jparsedefaultcommodity :: Maybe (CommoditySymbol,AmountStyle) -- ^ the current default commodity and its format, specified by the most recent D directive ,jparseparentaccounts :: [AccountName] -- ^ the current stack of parent account names, specified by apply account directives ,jparsealiases :: [AccountAlias] -- ^ the current account name aliases in effect, specified by alias directives (& options ?) -- ,jparsetransactioncount :: Integer -- ^ the current count of transactions parsed so far (only journal format txns, currently) ,jparsetimeclockentries :: [TimeclockEntry] -- ^ timeclock sessions which have not been clocked out -- principal data ,jaccounts :: [AccountName] -- ^ accounts that have been declared by account directives ,jcommodities :: M.Map CommoditySymbol Commodity -- ^ commodities and formats declared by commodity directives ,jinferredcommodities :: M.Map CommoditySymbol AmountStyle -- ^ commodities and formats inferred from journal amounts ,jmarketprices :: [MarketPrice] ,jmodifiertxns :: [ModifierTransaction] ,jperiodictxns :: [PeriodicTransaction] ,jtxns :: [Transaction] ,jfinalcommentlines :: Text -- ^ any final trailing comments in the (main) journal file ,jfiles :: [(FilePath, Text)] -- ^ the file path and raw text of the main and -- any included journal files. The main file is first, -- followed by any included files in the order encountered. ,jlastreadtime :: ClockTime -- ^ when this journal was last read from its file(s) } deriving (Eq, Typeable, Data, Generic) deriving instance Data (ClockTime) deriving instance Typeable (ClockTime) deriving instance Generic (ClockTime) instance NFData ClockTime instance NFData Journal -- | A journal in the process of being parsed, not yet finalised. -- The data is partial, and list fields are in reverse order. type ParsedJournal = Journal -- | The id of a data format understood by hledger, eg @journal@ or @csv@. -- The --output-format option selects one of these for output. type StorageFormat = String -- | A hledger journal reader is a triple of storage format name, a -- detector of that format, and a parser from that format to Journal. data Reader = Reader { -- The canonical name of the format handled by this reader rFormat :: StorageFormat -- The file extensions recognised as containing this format ,rExtensions :: [String] -- A text parser for this format, accepting an optional rules file, -- assertion-checking flag, and file path for error messages, -- producing an exception-raising IO action that returns a journal -- or error message. ,rParser :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal -- Experimental readers are never tried automatically. ,rExperimental :: Bool } instance Show Reader where show r = rFormat r ++ " reader" -- | An account, with name, balances and links to parent/subaccounts -- which let you walk up or down the account tree. data Account = Account { aname :: AccountName, -- ^ this account's full name aebalance :: MixedAmount, -- ^ this account's balance, excluding subaccounts asubs :: [Account], -- ^ sub-accounts anumpostings :: Int, -- ^ number of postings to this account -- derived from the above : aibalance :: MixedAmount, -- ^ this account's balance, including subaccounts aparent :: Maybe Account, -- ^ parent account aboring :: Bool -- ^ used in the accounts report to label elidable parents } deriving (Typeable, Data, Generic) -- | A Ledger has the journal it derives from, and the accounts -- derived from that. Accounts are accessible both list-wise and -- tree-wise, since each one knows its parent and subs; the first -- account is the root of the tree and always exists. data Ledger = Ledger { ljournal :: Journal, laccounts :: [Account] } hledger-lib-1.2/Hledger/Query.hs0000644000000000000000000010553513042200120014724 0ustar0000000000000000{-| A general query system for matching things (accounts, postings, transactions..) by various criteria, and a parser for query expressions. -} {-# LANGUAGE DeriveDataTypeable, OverloadedStrings, ViewPatterns #-} module Hledger.Query ( -- * Query and QueryOpt Query(..), QueryOpt(..), -- * parsing parseQuery, simplifyQuery, filterQuery, -- * accessors queryIsNull, queryIsAcct, queryIsDepth, queryIsDate, queryIsDate2, queryIsDateOrDate2, queryIsStartDateOnly, queryIsSym, queryIsReal, queryIsStatus, queryIsEmpty, queryStartDate, queryEndDate, queryDateSpan, queryDateSpan', queryDepth, inAccount, inAccountQuery, -- * matching matchesTransaction, matchesPosting, matchesAccount, matchesMixedAmount, matchesAmount, words'', -- * tests tests_Hledger_Query ) where import Data.Data import Data.Either import Data.List import Data.Maybe import Data.Monoid ((<>)) -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Safe (readDef, headDef) import Test.HUnit import Text.Megaparsec import Text.Megaparsec.Text import Hledger.Utils hiding (words') import Hledger.Data.Types import Hledger.Data.AccountName import Hledger.Data.Amount (amount, nullamt, usd) import Hledger.Data.Dates import Hledger.Data.Posting import Hledger.Data.Transaction -- | A query is a composition of search criteria, which can be used to -- match postings, transactions, accounts and more. data Query = Any -- ^ always match | None -- ^ never match | Not Query -- ^ negate this match | Or [Query] -- ^ match if any of these match | And [Query] -- ^ match if all of these match | Code Regexp -- ^ match if code matches this regexp | Desc Regexp -- ^ match if description matches this regexp | Acct Regexp -- ^ match postings whose account matches this regexp | Date DateSpan -- ^ match if primary date in this date span | Date2 DateSpan -- ^ match if secondary date in this date span | Status ClearedStatus -- ^ match txns/postings with this cleared status (Status Uncleared matches all states except cleared) | Real Bool -- ^ match if "realness" (involves a real non-virtual account ?) has this value | Amt OrdPlus Quantity -- ^ match if the amount's numeric quantity is less than/greater than/equal to/unsignedly equal to some value | Sym Regexp -- ^ match if the entire commodity symbol is matched by this regexp | Empty Bool -- ^ if true, show zero-amount postings/accounts which are usually not shown -- more of a query option than a query criteria ? | Depth Int -- ^ match if account depth is less than or equal to this value. -- Depth is sometimes used like a query (for filtering report data) -- and sometimes like a query option (for controlling display) | Tag Regexp (Maybe Regexp) -- ^ match if a tag's name, and optionally its value, is matched by these respective regexps -- matching the regexp if provided, exists deriving (Eq,Data,Typeable) -- custom Show implementation to show strings more accurately, eg for debugging regexps instance Show Query where show Any = "Any" show None = "None" show (Not q) = "Not (" ++ show q ++ ")" show (Or qs) = "Or (" ++ show qs ++ ")" show (And qs) = "And (" ++ show qs ++ ")" show (Code r) = "Code " ++ show r show (Desc r) = "Desc " ++ show r show (Acct r) = "Acct " ++ show r show (Date ds) = "Date (" ++ show ds ++ ")" show (Date2 ds) = "Date2 (" ++ show ds ++ ")" show (Status b) = "Status " ++ show b show (Real b) = "Real " ++ show b show (Amt ord qty) = "Amt " ++ show ord ++ " " ++ show qty show (Sym r) = "Sym " ++ show r show (Empty b) = "Empty " ++ show b show (Depth n) = "Depth " ++ show n show (Tag s ms) = "Tag " ++ show s ++ " (" ++ show ms ++ ")" -- | A query option changes a query's/report's behaviour and output in some way. data QueryOpt = QueryOptInAcctOnly AccountName -- ^ show an account register focussed on this account | QueryOptInAcct AccountName -- ^ as above but include sub-accounts in the account register -- | QueryOptCostBasis -- ^ show amounts converted to cost where possible -- | QueryOptDate2 -- ^ show secondary dates instead of primary dates deriving (Show, Eq, Data, Typeable) -- parsing -- -- | A query restricting the account(s) to be shown in the sidebar, if any. -- -- Just looks at the first query option. -- showAccountMatcher :: [QueryOpt] -> Maybe Query -- showAccountMatcher (QueryOptInAcctSubsOnly a:_) = Just $ Acct True $ accountNameToAccountRegex a -- showAccountMatcher _ = Nothing -- | Convert a query expression containing zero or more space-separated -- terms to a query and zero or more query options. A query term is either: -- -- 1. a search pattern, which matches on one or more fields, eg: -- -- acct:REGEXP - match the account name with a regular expression -- desc:REGEXP - match the transaction description -- date:PERIODEXP - match the date with a period expression -- -- The prefix indicates the field to match, or if there is no prefix -- account name is assumed. -- -- 2. a query option, which modifies the reporting behaviour in some -- way. There is currently one of these, which may appear only once: -- -- inacct:FULLACCTNAME -- -- The usual shell quoting rules are assumed. When a pattern contains -- whitespace, it (or the whole term including prefix) should be enclosed -- in single or double quotes. -- -- Period expressions may contain relative dates, so a reference date is -- required to fully parse these. -- -- Multiple terms are combined as follows: -- 1. multiple account patterns are OR'd together -- 2. multiple description patterns are OR'd together -- 3. then all terms are AND'd together parseQuery :: Day -> T.Text -> (Query,[QueryOpt]) parseQuery d s = (q, opts) where terms = words'' prefixes s (pats, opts) = partitionEithers $ map (parseQueryTerm d) terms (descpats, pats') = partition queryIsDesc pats (acctpats, otherpats) = partition queryIsAcct pats' q = simplifyQuery $ And $ [Or acctpats, Or descpats] ++ otherpats tests_parseQuery = [ "parseQuery" ~: do let d = nulldate -- parsedate "2011/1/1" parseQuery d "acct:'expenses:autres d\233penses' desc:b" `is` (And [Acct "expenses:autres d\233penses", Desc "b"], []) parseQuery d "inacct:a desc:\"b b\"" `is` (Desc "b b", [QueryOptInAcct "a"]) parseQuery d "inacct:a inacct:b" `is` (Any, [QueryOptInAcct "a", QueryOptInAcct "b"]) parseQuery d "desc:'x x'" `is` (Desc "x x", []) parseQuery d "'a a' 'b" `is` (Or [Acct "a a",Acct "'b"], []) parseQuery d "\"" `is` (Acct "\"", []) ] -- XXX -- | Quote-and-prefix-aware version of words - don't split on spaces which -- are inside quotes, including quotes which may have one of the specified -- prefixes in front, and maybe an additional not: prefix in front of that. words'' :: [T.Text] -> T.Text -> [T.Text] words'' prefixes = fromparse . parsewith maybeprefixedquotedphrases -- XXX where maybeprefixedquotedphrases :: Parser [T.Text] maybeprefixedquotedphrases = choice' [prefixedQuotedPattern, singleQuotedPattern, doubleQuotedPattern, pattern] `sepBy` some spacenonewline prefixedQuotedPattern :: Parser T.Text prefixedQuotedPattern = do not' <- fromMaybe "" `fmap` (optional $ string "not:") let allowednexts | null not' = prefixes | otherwise = prefixes ++ [""] next <- fmap T.pack $ choice' $ map (string . T.unpack) allowednexts let prefix :: T.Text prefix = T.pack not' <> next p <- singleQuotedPattern <|> doubleQuotedPattern return $ prefix <> stripquotes p singleQuotedPattern :: Parser T.Text singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf ("'" :: [Char])) >>= return . stripquotes . T.pack doubleQuotedPattern :: Parser T.Text doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf ("\"" :: [Char])) >>= return . stripquotes . T.pack pattern :: Parser T.Text pattern = fmap T.pack $ many (noneOf (" \n\r" :: [Char])) tests_words'' = [ "words''" ~: do assertEqual "1" ["a","b"] (words'' [] "a b") assertEqual "2" ["a b"] (words'' [] "'a b'") assertEqual "3" ["not:a","b"] (words'' [] "not:a b") assertEqual "4" ["not:a b"] (words'' [] "not:'a b'") assertEqual "5" ["not:a b"] (words'' [] "'not:a b'") assertEqual "6" ["not:desc:a b"] (words'' ["desc:"] "not:desc:'a b'") let s `gives` r = assertEqual "" r (words'' prefixes s) "\"acct:expenses:autres d\233penses\"" `gives` ["acct:expenses:autres d\233penses"] "\"" `gives` ["\""] ] -- XXX -- keep synced with patterns below, excluding "not" prefixes :: [T.Text] prefixes = map (<>":") [ "inacctonly" ,"inacct" ,"amt" ,"code" ,"desc" ,"acct" ,"date" ,"date2" ,"status" ,"cur" ,"real" ,"empty" ,"depth" ,"tag" ] defaultprefix :: T.Text defaultprefix = "acct" -- -- | Parse the query string as a boolean tree of match patterns. -- parseQueryTerm :: String -> Query -- parseQueryTerm s = either (const (Any)) id $ runParser query () "" $ lexmatcher s -- lexmatcher :: String -> [String] -- lexmatcher s = words' s -- query :: GenParser String () Query -- query = undefined -- | Parse a single query term as either a query or a query option, -- or raise an error if it has invalid syntax. parseQueryTerm :: Day -> T.Text -> Either Query QueryOpt parseQueryTerm _ (T.stripPrefix "inacctonly:" -> Just s) = Right $ QueryOptInAcctOnly s parseQueryTerm _ (T.stripPrefix "inacct:" -> Just s) = Right $ QueryOptInAcct s parseQueryTerm d (T.stripPrefix "not:" -> Just s) = case parseQueryTerm d s of Left m -> Left $ Not m Right _ -> Left Any -- not:somequeryoption will be ignored parseQueryTerm _ (T.stripPrefix "code:" -> Just s) = Left $ Code $ T.unpack s parseQueryTerm _ (T.stripPrefix "desc:" -> Just s) = Left $ Desc $ T.unpack s parseQueryTerm _ (T.stripPrefix "acct:" -> Just s) = Left $ Acct $ T.unpack s parseQueryTerm d (T.stripPrefix "date2:" -> Just s) = case parsePeriodExpr d s of Left e -> error' $ "\"date2:"++T.unpack s++"\" gave a "++showDateParseError e Right (_,span) -> Left $ Date2 span parseQueryTerm d (T.stripPrefix "date:" -> Just s) = case parsePeriodExpr d s of Left e -> error' $ "\"date:"++T.unpack s++"\" gave a "++showDateParseError e Right (_,span) -> Left $ Date span parseQueryTerm _ (T.stripPrefix "status:" -> Just s) = case parseStatus s of Left e -> error' $ "\"status:"++T.unpack s++"\" gave a parse error: " ++ e Right st -> Left $ Status st parseQueryTerm _ (T.stripPrefix "real:" -> Just s) = Left $ Real $ parseBool s || T.null s parseQueryTerm _ (T.stripPrefix "amt:" -> Just s) = Left $ Amt ord q where (ord, q) = parseAmountQueryTerm s parseQueryTerm _ (T.stripPrefix "empty:" -> Just s) = Left $ Empty $ parseBool s parseQueryTerm _ (T.stripPrefix "depth:" -> Just s) | n >= 0 = Left $ Depth n | otherwise = error' "depth: should have a positive number" where n = readDef 0 (T.unpack s) parseQueryTerm _ (T.stripPrefix "cur:" -> Just s) = Left $ Sym (T.unpack s) -- support cur: as an alias parseQueryTerm _ (T.stripPrefix "tag:" -> Just s) = Left $ Tag n v where (n,v) = parseTag s parseQueryTerm _ "" = Left $ Any parseQueryTerm d s = parseQueryTerm d $ defaultprefix<>":"<>s tests_parseQueryTerm = [ "parseQueryTerm" ~: do let s `gives` r = parseQueryTerm nulldate s `is` r "a" `gives` (Left $ Acct "a") "acct:expenses:autres d\233penses" `gives` (Left $ Acct "expenses:autres d\233penses") "not:desc:a b" `gives` (Left $ Not $ Desc "a b") "status:1" `gives` (Left $ Status Cleared) "status:*" `gives` (Left $ Status Cleared) "status:!" `gives` (Left $ Status Pending) "status:0" `gives` (Left $ Status Uncleared) "status:" `gives` (Left $ Status Uncleared) "real:1" `gives` (Left $ Real True) "date:2008" `gives` (Left $ Date $ DateSpan (Just $ parsedate "2008/01/01") (Just $ parsedate "2009/01/01")) "date:from 2012/5/17" `gives` (Left $ Date $ DateSpan (Just $ parsedate "2012/05/17") Nothing) "inacct:a" `gives` (Right $ QueryOptInAcct "a") "tag:a" `gives` (Left $ Tag "a" Nothing) "tag:a=some value" `gives` (Left $ Tag "a" (Just "some value")) -- "amt:<0" `gives` (Left $ Amt LT 0) -- "amt:=.23" `gives` (Left $ Amt EQ 0.23) -- "amt:>10000.10" `gives` (Left $ Amt GT 10000.1) ] data OrdPlus = Lt | LtEq | Gt | GtEq | Eq | AbsLt | AbsLtEq | AbsGt | AbsGtEq | AbsEq deriving (Show,Eq,Data,Typeable) -- can fail parseAmountQueryTerm :: T.Text -> (OrdPlus, Quantity) parseAmountQueryTerm s' = case s' of -- feel free to do this a smarter way "" -> err (T.stripPrefix "<+" -> Just s) -> (Lt, readDef err (T.unpack s)) (T.stripPrefix "<=+" -> Just s) -> (LtEq, readDef err (T.unpack s)) (T.stripPrefix ">+" -> Just s) -> (Gt, readDef err (T.unpack s)) (T.stripPrefix ">=+" -> Just s) -> (GtEq, readDef err (T.unpack s)) (T.stripPrefix "=+" -> Just s) -> (Eq, readDef err (T.unpack s)) (T.stripPrefix "+" -> Just s) -> (Eq, readDef err (T.unpack s)) (T.stripPrefix "<-" -> Just s) -> (Lt, negate $ readDef err (T.unpack s)) (T.stripPrefix "<=-" -> Just s) -> (LtEq, negate $ readDef err (T.unpack s)) (T.stripPrefix ">-" -> Just s) -> (Gt, negate $ readDef err (T.unpack s)) (T.stripPrefix ">=-" -> Just s) -> (GtEq, negate $ readDef err (T.unpack s)) (T.stripPrefix "=-" -> Just s) -> (Eq, negate $ readDef err (T.unpack s)) (T.stripPrefix "-" -> Just s) -> (Eq, negate $ readDef err (T.unpack s)) (T.stripPrefix "<=" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (LtEq, 0) _ -> (AbsLtEq, n) (T.stripPrefix "<" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (Lt, 0) _ -> (AbsLt, n) (T.stripPrefix ">=" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (GtEq, 0) _ -> (AbsGtEq, n) (T.stripPrefix ">" -> Just s) -> let n = readDef err (T.unpack s) in case n of 0 -> (Gt, 0) _ -> (AbsGt, n) (T.stripPrefix "=" -> Just s) -> (AbsEq, readDef err (T.unpack s)) s -> (AbsEq, readDef err (T.unpack s)) where err = error' $ "could not parse as '=', '<', or '>' (optional) followed by a (optionally signed) numeric quantity: " ++ T.unpack s' tests_parseAmountQueryTerm = [ "parseAmountQueryTerm" ~: do let s `gives` r = parseAmountQueryTerm s `is` r "<0" `gives` (Lt,0) -- special case for convenience, since AbsLt 0 would be always false ">0" `gives` (Gt,0) -- special case for convenience and consistency with above ">10000.10" `gives` (AbsGt,10000.1) "=0.23" `gives` (AbsEq,0.23) "0.23" `gives` (AbsEq,0.23) "<=+0.23" `gives` (LtEq,0.23) "-0.23" `gives` (Eq,(-0.23)) ] parseTag :: T.Text -> (Regexp, Maybe Regexp) parseTag s | "=" `T.isInfixOf` s = (T.unpack n, Just $ tail $ T.unpack v) | otherwise = (T.unpack s, Nothing) where (n,v) = T.break (=='=') s -- | Parse the value part of a "status:" query, or return an error. parseStatus :: T.Text -> Either String ClearedStatus parseStatus s | s `elem` ["*","1"] = Right Cleared | s `elem` ["!"] = Right Pending | s `elem` ["","0"] = Right Uncleared | otherwise = Left $ "could not parse "++show s++" as a status (should be *, ! or empty)" -- | Parse the boolean value part of a "status:" query. "1" means true, -- anything else will be parsed as false without error. parseBool :: T.Text -> Bool parseBool s = s `elem` truestrings truestrings :: [T.Text] truestrings = ["1"] simplifyQuery :: Query -> Query simplifyQuery q = let q' = simplify q in if q' == q then q else simplifyQuery q' where simplify (And []) = Any simplify (And [q]) = simplify q simplify (And qs) | same qs = simplify $ head qs | any (==None) qs = None | all queryIsDate qs = Date $ spansIntersect $ mapMaybe queryTermDateSpan qs | otherwise = And $ concat $ [map simplify dateqs, map simplify otherqs] where (dateqs, otherqs) = partition queryIsDate $ filter (/=Any) qs simplify (Or []) = Any simplify (Or [q]) = simplifyQuery q simplify (Or qs) | same qs = simplify $ head qs | any (==Any) qs = Any -- all queryIsDate qs = Date $ spansUnion $ mapMaybe queryTermDateSpan qs ? | otherwise = Or $ map simplify $ filter (/=None) qs simplify (Date (DateSpan Nothing Nothing)) = Any simplify (Date2 (DateSpan Nothing Nothing)) = Any simplify q = q tests_simplifyQuery = [ "simplifyQuery" ~: do let q `gives` r = assertEqual "" r (simplifyQuery q) Or [Acct "a"] `gives` Acct "a" Or [Any,None] `gives` Any And [Any,None] `gives` None And [Any,Any] `gives` Any And [Acct "b",Any] `gives` Acct "b" And [Any,And [Date (DateSpan Nothing Nothing)]] `gives` Any And [Date (DateSpan Nothing (Just $ parsedate "2013-01-01")), Date (DateSpan (Just $ parsedate "2012-01-01") Nothing)] `gives` Date (DateSpan (Just $ parsedate "2012-01-01") (Just $ parsedate "2013-01-01")) And [Or [],Or [Desc "b b"]] `gives` Desc "b b" ] same [] = True same (a:as) = all (a==) as -- | Remove query terms (or whole sub-expressions) not matching the given -- predicate from this query. XXX Semantics not completely clear. filterQuery :: (Query -> Bool) -> Query -> Query filterQuery p = simplifyQuery . filterQuery' p filterQuery' :: (Query -> Bool) -> Query -> Query filterQuery' p (And qs) = And $ map (filterQuery p) qs filterQuery' p (Or qs) = Or $ map (filterQuery p) qs -- filterQuery' p (Not q) = Not $ filterQuery p q filterQuery' p q = if p q then q else Any tests_filterQuery = [ "filterQuery" ~: do let (q,p) `gives` r = assertEqual "" r (filterQuery p q) (Any, queryIsDepth) `gives` Any (Depth 1, queryIsDepth) `gives` Depth 1 (And [And [Status Cleared,Depth 1]], not . queryIsDepth) `gives` Status Cleared -- (And [Date nulldatespan, Not (Or [Any, Depth 1])], queryIsDepth) `gives` And [Not (Or [Depth 1])] ] -- * accessors -- | Does this query match everything ? queryIsNull :: Query -> Bool queryIsNull Any = True queryIsNull (And []) = True queryIsNull (Not (Or [])) = True queryIsNull _ = False queryIsDepth :: Query -> Bool queryIsDepth (Depth _) = True queryIsDepth _ = False queryIsDate :: Query -> Bool queryIsDate (Date _) = True queryIsDate _ = False queryIsDate2 :: Query -> Bool queryIsDate2 (Date2 _) = True queryIsDate2 _ = False queryIsDateOrDate2 :: Query -> Bool queryIsDateOrDate2 (Date _) = True queryIsDateOrDate2 (Date2 _) = True queryIsDateOrDate2 _ = False queryIsDesc :: Query -> Bool queryIsDesc (Desc _) = True queryIsDesc _ = False queryIsAcct :: Query -> Bool queryIsAcct (Acct _) = True queryIsAcct _ = False queryIsSym :: Query -> Bool queryIsSym (Sym _) = True queryIsSym _ = False queryIsReal :: Query -> Bool queryIsReal (Real _) = True queryIsReal _ = False queryIsStatus :: Query -> Bool queryIsStatus (Status _) = True queryIsStatus _ = False queryIsEmpty :: Query -> Bool queryIsEmpty (Empty _) = True queryIsEmpty _ = False -- | Does this query specify a start date and nothing else (that would -- filter postings prior to the date) ? -- When the flag is true, look for a starting secondary date instead. queryIsStartDateOnly :: Bool -> Query -> Bool queryIsStartDateOnly _ Any = False queryIsStartDateOnly _ None = False queryIsStartDateOnly secondary (Or ms) = and $ map (queryIsStartDateOnly secondary) ms queryIsStartDateOnly secondary (And ms) = and $ map (queryIsStartDateOnly secondary) ms queryIsStartDateOnly False (Date (DateSpan (Just _) _)) = True queryIsStartDateOnly True (Date2 (DateSpan (Just _) _)) = True queryIsStartDateOnly _ _ = False -- | What start date (or secondary date) does this query specify, if any ? -- For OR expressions, use the earliest of the dates. NOT is ignored. queryStartDate :: Bool -> Query -> Maybe Day queryStartDate secondary (Or ms) = earliestMaybeDate $ map (queryStartDate secondary) ms queryStartDate secondary (And ms) = latestMaybeDate $ map (queryStartDate secondary) ms queryStartDate False (Date (DateSpan (Just d) _)) = Just d queryStartDate True (Date2 (DateSpan (Just d) _)) = Just d queryStartDate _ _ = Nothing -- | What end date (or secondary date) does this query specify, if any ? -- For OR expressions, use the latest of the dates. NOT is ignored. queryEndDate :: Bool -> Query -> Maybe Day queryEndDate secondary (Or ms) = latestMaybeDate' $ map (queryEndDate secondary) ms queryEndDate secondary (And ms) = earliestMaybeDate' $ map (queryEndDate secondary) ms queryEndDate False (Date (DateSpan _ (Just d))) = Just d queryEndDate True (Date2 (DateSpan _ (Just d))) = Just d queryEndDate _ _ = Nothing queryTermDateSpan (Date span) = Just span queryTermDateSpan _ = Nothing -- | What date span (or secondary date span) does this query specify ? -- For OR expressions, use the widest possible span. NOT is ignored. queryDateSpan :: Bool -> Query -> DateSpan queryDateSpan secondary q = spansUnion $ queryDateSpans secondary q -- | Extract all date (or secondary date) spans specified in this query. -- NOT is ignored. queryDateSpans :: Bool -> Query -> [DateSpan] queryDateSpans secondary (Or qs) = concatMap (queryDateSpans secondary) qs queryDateSpans secondary (And qs) = concatMap (queryDateSpans secondary) qs queryDateSpans False (Date span) = [span] queryDateSpans True (Date2 span) = [span] queryDateSpans _ _ = [] -- | What date span (or secondary date span) does this query specify ? -- For OR expressions, use the widest possible span. NOT is ignored. queryDateSpan' :: Query -> DateSpan queryDateSpan' q = spansUnion $ queryDateSpans' q -- | Extract all date (or secondary date) spans specified in this query. -- NOT is ignored. queryDateSpans' :: Query -> [DateSpan] queryDateSpans' (Or qs) = concatMap queryDateSpans' qs queryDateSpans' (And qs) = concatMap queryDateSpans' qs queryDateSpans' (Date span) = [span] queryDateSpans' (Date2 span) = [span] queryDateSpans' _ = [] -- | What is the earliest of these dates, where Nothing is latest ? earliestMaybeDate :: [Maybe Day] -> Maybe Day earliestMaybeDate mds = head $ sortBy compareMaybeDates mds ++ [Nothing] -- | What is the latest of these dates, where Nothing is earliest ? latestMaybeDate :: [Maybe Day] -> Maybe Day latestMaybeDate = headDef Nothing . sortBy (flip compareMaybeDates) -- | What is the earliest of these dates, ignoring Nothings ? earliestMaybeDate' :: [Maybe Day] -> Maybe Day earliestMaybeDate' = headDef Nothing . sortBy compareMaybeDates . filter isJust -- | What is the latest of these dates, ignoring Nothings ? latestMaybeDate' :: [Maybe Day] -> Maybe Day latestMaybeDate' = headDef Nothing . sortBy (flip compareMaybeDates) . filter isJust -- | Compare two maybe dates, Nothing is earliest. compareMaybeDates :: Maybe Day -> Maybe Day -> Ordering compareMaybeDates Nothing Nothing = EQ compareMaybeDates Nothing (Just _) = LT compareMaybeDates (Just _) Nothing = GT compareMaybeDates (Just a) (Just b) = compare a b -- | The depth limit this query specifies, or a large number if none. queryDepth :: Query -> Int queryDepth q = case queryDepth' q of [] -> 99999 ds -> minimum ds where queryDepth' (Depth d) = [d] queryDepth' (Or qs) = concatMap queryDepth' qs queryDepth' (And qs) = concatMap queryDepth' qs queryDepth' _ = [] -- | The account we are currently focussed on, if any, and whether subaccounts are included. -- Just looks at the first query option. inAccount :: [QueryOpt] -> Maybe (AccountName,Bool) inAccount [] = Nothing inAccount (QueryOptInAcctOnly a:_) = Just (a,False) inAccount (QueryOptInAcct a:_) = Just (a,True) -- | A query for the account(s) we are currently focussed on, if any. -- Just looks at the first query option. inAccountQuery :: [QueryOpt] -> Maybe Query inAccountQuery [] = Nothing inAccountQuery (QueryOptInAcctOnly a : _) = Just $ Acct $ accountNameToAccountOnlyRegex a inAccountQuery (QueryOptInAcct a : _) = Just $ Acct $ accountNameToAccountRegex a -- -- | Convert a query to its inverse. -- negateQuery :: Query -> Query -- negateQuery = Not -- matching -- | Does the match expression match this account ? -- A matching in: clause is also considered a match. matchesAccount :: Query -> AccountName -> Bool matchesAccount (None) _ = False matchesAccount (Not m) a = not $ matchesAccount m a matchesAccount (Or ms) a = any (`matchesAccount` a) ms matchesAccount (And ms) a = all (`matchesAccount` a) ms matchesAccount (Acct r) a = regexMatchesCI r (T.unpack a) -- XXX pack matchesAccount (Depth d) a = accountNameLevel a <= d matchesAccount (Tag _ _) _ = False matchesAccount _ _ = True tests_matchesAccount = [ "matchesAccount" ~: do assertBool "positive acct match" $ matchesAccount (Acct "b:c") "a:bb:c:d" -- assertBool "acct should match at beginning" $ not $ matchesAccount (Acct True "a:b") "c:a:b" let q `matches` a = assertBool "" $ q `matchesAccount` a Depth 2 `matches` "a:b" assertBool "" $ Depth 2 `matchesAccount` "a" assertBool "" $ Depth 2 `matchesAccount` "a:b" assertBool "" $ not $ Depth 2 `matchesAccount` "a:b:c" assertBool "" $ Date nulldatespan `matchesAccount` "a" assertBool "" $ Date2 nulldatespan `matchesAccount` "a" assertBool "" $ not $ (Tag "a" Nothing) `matchesAccount` "a" ] matchesMixedAmount :: Query -> MixedAmount -> Bool matchesMixedAmount q (Mixed []) = q `matchesAmount` nullamt matchesMixedAmount q (Mixed as) = any (q `matchesAmount`) as -- | Does the match expression match this (simple) amount ? matchesAmount :: Query -> Amount -> Bool matchesAmount (Not q) a = not $ q `matchesAmount` a matchesAmount (Any) _ = True matchesAmount (None) _ = False matchesAmount (Or qs) a = any (`matchesAmount` a) qs matchesAmount (And qs) a = all (`matchesAmount` a) qs -- matchesAmount (Amt ord n) a = compareAmount ord n a matchesAmount (Sym r) a = regexMatchesCI ("^" ++ r ++ "$") $ T.unpack $ acommodity a -- matchesAmount _ _ = True -- | Is this simple (single-amount) mixed amount's quantity less than, greater than, equal to, or unsignedly equal to this number ? -- For multi-amount (multiple commodities, or just unsimplified) mixed amounts this is always true. -- | Is this amount's quantity less than, greater than, equal to, or unsignedly equal to this number ? compareAmount :: OrdPlus -> Quantity -> Amount -> Bool compareAmount ord q Amount{aquantity=aq} = case ord of Lt -> aq < q LtEq -> aq <= q Gt -> aq > q GtEq -> aq >= q Eq -> aq == q AbsLt -> abs aq < abs q AbsLtEq -> abs aq <= abs q AbsGt -> abs aq > abs q AbsGtEq -> abs aq >= abs q AbsEq -> abs aq == abs q -- | Does the match expression match this posting ? -- -- Note that for account match we try both original and effective account matchesPosting :: Query -> Posting -> Bool matchesPosting (Not q) p = not $ q `matchesPosting` p matchesPosting (Any) _ = True matchesPosting (None) _ = False matchesPosting (Or qs) p = any (`matchesPosting` p) qs matchesPosting (And qs) p = all (`matchesPosting` p) qs matchesPosting (Code r) p = regexMatchesCI r $ maybe "" (T.unpack . tcode) $ ptransaction p matchesPosting (Desc r) p = regexMatchesCI r $ maybe "" (T.unpack . tdescription) $ ptransaction p matchesPosting (Acct r) p = matchesPosting p || matchesPosting (originalPosting p) where matchesPosting p = regexMatchesCI r $ T.unpack $ paccount p -- XXX pack matchesPosting (Date span) p = span `spanContainsDate` postingDate p matchesPosting (Date2 span) p = span `spanContainsDate` postingDate2 p matchesPosting (Status Uncleared) p = postingStatus p /= Cleared matchesPosting (Status s) p = postingStatus p == s matchesPosting (Real v) p = v == isReal p matchesPosting q@(Depth _) Posting{paccount=a} = q `matchesAccount` a matchesPosting q@(Amt _ _) Posting{pamount=amt} = q `matchesMixedAmount` amt -- matchesPosting q@(Amt _ _) Posting{pamount=amt} = q `matchesMixedAmount` amt -- matchesPosting (Empty v) Posting{pamount=a} = v == isZeroMixedAmount a -- matchesPosting (Empty False) Posting{pamount=a} = True -- matchesPosting (Empty True) Posting{pamount=a} = isZeroMixedAmount a matchesPosting (Empty _) _ = True matchesPosting (Sym r) Posting{pamount=Mixed as} = any (regexMatchesCI $ "^" ++ r ++ "$") $ map (T.unpack . acommodity) as matchesPosting (Tag n v) p = not $ null $ matchedTags n v $ postingAllTags p -- matchesPosting _ _ = False tests_matchesPosting = [ "matchesPosting" ~: do -- matching posting status.. assertBool "positive match on cleared posting status" $ (Status Cleared) `matchesPosting` nullposting{pstatus=Cleared} assertBool "negative match on cleared posting status" $ not $ (Not $ Status Cleared) `matchesPosting` nullposting{pstatus=Cleared} assertBool "positive match on unclered posting status" $ (Status Uncleared) `matchesPosting` nullposting{pstatus=Uncleared} assertBool "negative match on unclered posting status" $ not $ (Not $ Status Uncleared) `matchesPosting` nullposting{pstatus=Uncleared} assertBool "positive match on true posting status acquired from transaction" $ (Status Cleared) `matchesPosting` nullposting{pstatus=Uncleared,ptransaction=Just nulltransaction{tstatus=Cleared}} assertBool "real:1 on real posting" $ (Real True) `matchesPosting` nullposting{ptype=RegularPosting} assertBool "real:1 on virtual posting fails" $ not $ (Real True) `matchesPosting` nullposting{ptype=VirtualPosting} assertBool "real:1 on balanced virtual posting fails" $ not $ (Real True) `matchesPosting` nullposting{ptype=BalancedVirtualPosting} assertBool "a" $ (Acct "'b") `matchesPosting` nullposting{paccount="'b"} assertBool "b" $ not $ (Tag "a" (Just "r$")) `matchesPosting` nullposting assertBool "c" $ (Tag "foo" Nothing) `matchesPosting` nullposting{ptags=[("foo","")]} assertBool "d" $ (Tag "foo" Nothing) `matchesPosting` nullposting{ptags=[("foo","baz")]} assertBool "e" $ (Tag "foo" (Just "a")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "f" $ not $ (Tag "foo" (Just "a$")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "g" $ not $ (Tag " foo " (Just "a")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "h" $ not $ (Tag "foo foo" (Just " ar ba ")) `matchesPosting` nullposting{ptags=[("foo foo","bar bar")]} -- a tag match on a posting also sees inherited tags assertBool "i" $ (Tag "txntag" Nothing) `matchesPosting` nullposting{ptransaction=Just nulltransaction{ttags=[("txntag","")]}} assertBool "j" $ not $ (Sym "$") `matchesPosting` nullposting{pamount=Mixed [usd 1]} -- becomes "^$$", ie testing for null symbol assertBool "k" $ (Sym "\\$") `matchesPosting` nullposting{pamount=Mixed [usd 1]} -- have to quote $ for regexpr assertBool "l" $ (Sym "shekels") `matchesPosting` nullposting{pamount=Mixed [amount{acommodity="shekels"}]} assertBool "m" $ not $ (Sym "shek") `matchesPosting` nullposting{pamount=Mixed [amount{acommodity="shekels"}]} ] -- | Does the match expression match this transaction ? matchesTransaction :: Query -> Transaction -> Bool matchesTransaction (Not q) t = not $ q `matchesTransaction` t matchesTransaction (Any) _ = True matchesTransaction (None) _ = False matchesTransaction (Or qs) t = any (`matchesTransaction` t) qs matchesTransaction (And qs) t = all (`matchesTransaction` t) qs matchesTransaction (Code r) t = regexMatchesCI r $ T.unpack $ tcode t matchesTransaction (Desc r) t = regexMatchesCI r $ T.unpack $ tdescription t matchesTransaction q@(Acct _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Date span) t = spanContainsDate span $ tdate t matchesTransaction (Date2 span) t = spanContainsDate span $ transactionDate2 t matchesTransaction (Status Uncleared) t = tstatus t /= Cleared matchesTransaction (Status s) t = tstatus t == s matchesTransaction (Real v) t = v == hasRealPostings t matchesTransaction q@(Amt _ _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Empty _) _ = True matchesTransaction (Depth d) t = any (Depth d `matchesPosting`) $ tpostings t matchesTransaction q@(Sym _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Tag n v) t = not $ null $ matchedTags n v $ transactionAllTags t -- matchesTransaction _ _ = False tests_matchesTransaction = [ "matchesTransaction" ~: do let q `matches` t = assertBool "" $ q `matchesTransaction` t Any `matches` nulltransaction assertBool "" $ not $ (Desc "x x") `matchesTransaction` nulltransaction{tdescription="x"} assertBool "" $ (Desc "x x") `matchesTransaction` nulltransaction{tdescription="x x"} -- see posting for more tag tests assertBool "" $ (Tag "foo" (Just "a")) `matchesTransaction` nulltransaction{ttags=[("foo","bar")]} -- a tag match on a transaction also matches posting tags assertBool "" $ (Tag "postingtag" Nothing) `matchesTransaction` nulltransaction{tpostings=[nullposting{ptags=[("postingtag","")]}]} ] -- | Filter a list of tags by matching against their names and -- optionally also their values. matchedTags :: Regexp -> Maybe Regexp -> [Tag] -> [Tag] matchedTags namepat valuepat tags = filter (match namepat valuepat) tags where match npat Nothing (n,_) = regexMatchesCI npat (T.unpack n) -- XXX match npat (Just vpat) (n,v) = regexMatchesCI npat (T.unpack n) && regexMatchesCI vpat (T.unpack v) -- tests tests_Hledger_Query :: Test tests_Hledger_Query = TestList $ tests_simplifyQuery ++ tests_words'' ++ tests_filterQuery ++ tests_parseQueryTerm ++ tests_parseAmountQueryTerm ++ tests_parseQuery ++ tests_matchesAccount ++ tests_matchesPosting ++ tests_matchesTransaction hledger-lib-1.2/Hledger/Read.hs0000644000000000000000000002615213067102102014477 0ustar0000000000000000{-| This is the entry point to hledger's reading system, which can read Journals from various data formats. Use this module if you want to parse journal data or read journal files. Generally it should not be necessary to import modules below this one. -} {-# LANGUAGE ScopedTypeVariables, OverloadedStrings #-} module Hledger.Read ( -- * Journal files PrefixedFilePath, defaultJournal, defaultJournalPath, readJournalFiles, readJournalFile, requireJournalFileExists, ensureJournalFileExists, splitReaderPrefix, -- * Journal parsing readJournal, readJournal', -- * Re-exported JournalReader.accountaliasp, JournalReader.postingp, module Hledger.Read.Common, -- * Tests samplejournal, tests_Hledger_Read, ) where import Control.Applicative ((<|>)) import Control.Arrow (right) import qualified Control.Exception as C import Control.Monad.Except import Data.List import Data.Maybe import Data.Text (Text) import qualified Data.Text as T import Safe import System.Directory (doesFileExist, getHomeDirectory) import System.Environment (getEnv) import System.Exit (exitFailure) import System.FilePath ((), takeExtension) import System.IO (stderr) import Test.HUnit import Text.Printf import Hledger.Data.Dates (getCurrentDay) import Hledger.Data.Types import Hledger.Read.Common import qualified Hledger.Read.JournalReader as JournalReader -- import qualified Hledger.Read.LedgerReader as LedgerReader import qualified Hledger.Read.TimedotReader as TimedotReader import qualified Hledger.Read.TimeclockReader as TimeclockReader import qualified Hledger.Read.CsvReader as CsvReader import Hledger.Utils import Prelude hiding (getContents, writeFile) import Hledger.Utils.UTF8IOCompat (writeFile) journalEnvVar = "LEDGER_FILE" journalEnvVar2 = "LEDGER" journalDefaultFilename = ".hledger.journal" -- The available journal readers, each one handling a particular data format. readers :: [Reader] readers = [ JournalReader.reader ,TimeclockReader.reader ,TimedotReader.reader ,CsvReader.reader -- ,LedgerReader.reader ] readerNames :: [String] readerNames = map rFormat readers -- | A file path optionally prefixed by a reader name and colon -- (journal:, csv:, timedot:, etc.). type PrefixedFilePath = FilePath -- | Read the default journal file specified by the environment, or raise an error. defaultJournal :: IO Journal defaultJournal = defaultJournalPath >>= readJournalFile Nothing Nothing True >>= either error' return -- | Get the default journal file path specified by the environment. -- Like ledger, we look first for the LEDGER_FILE environment -- variable, and if that does not exist, for the legacy LEDGER -- environment variable. If neither is set, or the value is blank, -- return the hard-coded default, which is @.hledger.journal@ in the -- users's home directory (or in the current directory, if we cannot -- determine a home directory). defaultJournalPath :: IO String defaultJournalPath = do s <- envJournalPath if null s then defaultJournalPath else return s where envJournalPath = getEnv journalEnvVar `C.catch` (\(_::C.IOException) -> getEnv journalEnvVar2 `C.catch` (\(_::C.IOException) -> return "")) defaultJournalPath = do home <- getHomeDirectory `C.catch` (\(_::C.IOException) -> return "") return $ home journalDefaultFilename -- | @readJournalFiles mformat mrulesfile assrt prefixedfiles@ -- -- Read a Journal from each specified file path and combine them into one. -- Or, return the first error message. -- -- Combining Journals means concatenating them, basically. -- The parse state resets at the start of each file, which means that -- directives & aliases do not cross file boundaries. -- (The final parse state saved in the Journal does span all files, however.) -- -- As with readJournalFile, -- file paths can optionally have a READER: prefix, -- and the @mformat@, @mrulesfile, and @assrt@ arguments are supported -- (and these are applied to all files). -- readJournalFiles :: Maybe StorageFormat -> Maybe FilePath -> Bool -> [PrefixedFilePath] -> IO (Either String Journal) readJournalFiles mformat mrulesfile assrt prefixedfiles = do (right mconcat1 . sequence) <$> mapM (readJournalFile mformat mrulesfile assrt) prefixedfiles where mconcat1 :: Monoid t => [t] -> t mconcat1 [] = mempty mconcat1 x = foldr1 mappend x -- | @readJournalFile mformat mrulesfile assrt prefixedfile@ -- -- Read a Journal from this file, or from stdin if the file path is -, -- or return an error message. The file path can have a READER: prefix. -- -- The reader (data format) is chosen based on (in priority order): -- the @mformat@ argument; -- the file path's READER: prefix, if any; -- a recognised file name extension (in readJournal); -- if none of these identify a known reader, all built-in readers are tried in turn. -- -- A CSV conversion rules file (@mrulesfiles@) can be specified to help convert CSV data. -- -- Optionally, any balance assertions in the journal can be checked (@assrt@). -- readJournalFile :: Maybe StorageFormat -> Maybe FilePath -> Bool -> PrefixedFilePath -> IO (Either String Journal) readJournalFile mformat mrulesfile assrt prefixedfile = do let (mprefixformat, f) = splitReaderPrefix prefixedfile mfmt = mformat <|> mprefixformat requireJournalFileExists f readFileOrStdinAnyLineEnding f >>= readJournal mfmt mrulesfile assrt (Just f) -- | If a filepath is prefixed by one of the reader names and a colon, -- split that off. Eg "csv:-" -> (Just "csv", "-"). splitReaderPrefix :: PrefixedFilePath -> (Maybe String, FilePath) splitReaderPrefix f = headDef (Nothing, f) [(Just r, drop (length r + 1) f) | r <- readerNames, (r++":") `isPrefixOf` f] -- | If the specified journal file does not exist (and is not "-"), -- give a helpful error and quit. requireJournalFileExists :: FilePath -> IO () requireJournalFileExists "-" = return () requireJournalFileExists f = do exists <- doesFileExist f when (not exists) $ do -- XXX might not be a journal file hPrintf stderr "The hledger journal file \"%s\" was not found.\n" f hPrintf stderr "Please create it first, eg with \"hledger add\" or a text editor.\n" hPrintf stderr "Or, specify an existing journal file with -f or LEDGER_FILE.\n" exitFailure -- | Ensure there is a journal file at the given path, creating an empty one if needed. ensureJournalFileExists :: FilePath -> IO () ensureJournalFileExists f = do exists <- doesFileExist f when (not exists) $ do hPrintf stderr "Creating hledger journal file %s.\n" f -- note Hledger.Utils.UTF8.* do no line ending conversion on windows, -- we currently require unix line endings on all platforms. newJournalContent >>= writeFile f -- | Give the content for a new auto-created journal file. newJournalContent :: IO String newJournalContent = do d <- getCurrentDay return $ printf "; journal created %s by hledger\n" (show d) -- | Read a Journal from the given text trying all readers in turn, or throw an error. readJournal' :: Text -> IO Journal readJournal' t = readJournal Nothing Nothing True Nothing t >>= either error' return tests_readJournal' = [ "readJournal' parses sample journal" ~: do _ <- samplejournal assertBool "" True ] -- | @readJournal mformat mrulesfile assrt mfile txt@ -- -- Read a Journal from some text, or return an error message. -- -- The reader (data format) is chosen based on (in priority order): -- the @mformat@ argument; -- a recognised file name extension in @mfile@ (if provided). -- If none of these identify a known reader, all built-in readers are tried in turn -- (returning the first one's error message if none of them succeed). -- -- A CSV conversion rules file (@mrulesfiles@) can be specified to help convert CSV data. -- -- Optionally, any balance assertions in the journal can be checked (@assrt@). -- readJournal :: Maybe StorageFormat -> Maybe FilePath -> Bool -> Maybe FilePath -> Text -> IO (Either String Journal) readJournal mformat mrulesfile assrt mfile txt = let stablereaders = filter (not.rExperimental) readers rs = maybe stablereaders (:[]) $ findReader mformat mfile in tryReaders rs mrulesfile assrt mfile txt -- | @findReader mformat mpath@ -- -- Find the reader named by @mformat@, if provided. -- Or, if a file path is provided, find the first reader that handles -- its file extension, if any. findReader :: Maybe StorageFormat -> Maybe FilePath -> Maybe Reader findReader Nothing Nothing = Nothing findReader (Just fmt) _ = headMay [r | r <- readers, rFormat r == fmt] findReader Nothing (Just path) = case prefix of Just fmt -> headMay [r | r <- readers, rFormat r == fmt] Nothing -> headMay [r | r <- readers, ext `elem` rExtensions r] where (prefix,path') = splitReaderPrefix path ext = drop 1 $ takeExtension path' -- | @tryReaders readers mrulesfile assrt path t@ -- -- Try to parse the given text to a Journal using each reader in turn, -- returning the first success, or if all of them fail, the first error message. tryReaders :: [Reader] -> Maybe FilePath -> Bool -> Maybe FilePath -> Text -> IO (Either String Journal) tryReaders readers mrulesfile assrt path t = firstSuccessOrFirstError [] readers where firstSuccessOrFirstError :: [String] -> [Reader] -> IO (Either String Journal) firstSuccessOrFirstError [] [] = return $ Left "no readers found" firstSuccessOrFirstError errs (r:rs) = do dbg1IO "trying reader" (rFormat r) result <- (runExceptT . (rParser r) mrulesfile assrt path') t dbg1IO "reader result" $ either id show result case result of Right j -> return $ Right j -- success! Left e -> firstSuccessOrFirstError (errs++[e]) rs -- keep trying firstSuccessOrFirstError (e:_) [] = return $ Left e -- none left, return first error path' = fromMaybe "(string)" path -- tests samplejournal = readJournal' $ T.unlines ["2008/01/01 income" ," assets:bank:checking $1" ," income:salary" ,"" ,"comment" ,"multi line comment here" ,"for testing purposes" ,"end comment" ,"" ,"2008/06/01 gift" ," assets:bank:checking $1" ," income:gifts" ,"" ,"2008/06/02 save" ," assets:bank:saving $1" ," assets:bank:checking" ,"" ,"2008/06/03 * eat & shop" ," expenses:food $1" ," expenses:supplies $1" ," assets:cash" ,"" ,"2008/12/31 * pay off" ," liabilities:debts $1" ," assets:bank:checking" ] tests_Hledger_Read = TestList $ tests_readJournal' ++ [ JournalReader.tests_Hledger_Read_JournalReader, -- LedgerReader.tests_Hledger_Read_LedgerReader, TimeclockReader.tests_Hledger_Read_TimeclockReader, TimedotReader.tests_Hledger_Read_TimedotReader, CsvReader.tests_Hledger_Read_CsvReader, "journal" ~: do r <- runExceptT $ parseWithState mempty JournalReader.journalp "" assertBool "journalp should parse an empty file" (isRight $ r) jE <- readJournal Nothing Nothing True Nothing "" -- don't know how to get it from journal either error' (assertBool "journalp parsing an empty file should give an empty journal" . null . jtxns) jE ] hledger-lib-1.2/Hledger/Read/Common.hs0000644000000000000000000010016513066173044015740 0ustar0000000000000000--- * doc -- Lines beginning "--- *" are collapsible orgstruct nodes. Emacs users, -- (add-hook 'haskell-mode-hook -- (lambda () (set-variable 'orgstruct-heading-prefix-regexp "--- " t)) -- 'orgstruct-mode) -- and press TAB on nodes to expand/collapse. {-| Some common parsers and helpers used by several readers. Some of these might belong in Hledger.Read.JournalReader or Hledger.Read. -} --- * module {-# LANGUAGE CPP, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings #-} module Hledger.Read.Common where --- * imports import Prelude () import Prelude.Compat hiding (readFile) import Control.Monad.Compat import Control.Monad.Except (ExceptT(..), runExceptT, throwError) --, catchError) import Control.Monad.State.Strict import Data.Char (isNumber) import Data.Functor.Identity import Data.List.Compat import Data.List.NonEmpty (NonEmpty(..)) import Data.List.Split (wordsBy) import Data.Maybe import Data.Monoid import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Data.Time.LocalTime import Safe import System.Time (getClockTime) import Text.Megaparsec hiding (parse,State) import Text.Megaparsec.Text import Hledger.Data import Hledger.Utils -- $setup --- * parsing utils -- | Run a string parser with no state in the identity monad. runTextParser, rtp :: TextParser Identity a -> Text -> Either (ParseError Char Dec) a runTextParser p t = runParser p "" t rtp = runTextParser -- | Run a journal parser with a null journal-parsing state. runJournalParser, rjp :: Monad m => TextParser m a -> Text -> m (Either (ParseError Char Dec) a) runJournalParser p t = runParserT p "" t rjp = runJournalParser -- | Run an error-raising journal parser with a null journal-parsing state. runErroringJournalParser, rejp :: Monad m => ErroringJournalParser m a -> Text -> m (Either String a) runErroringJournalParser p t = runExceptT $ runJournalParser (evalStateT p mempty) t >>= either (throwError . parseErrorPretty) return rejp = runErroringJournalParser genericSourcePos :: SourcePos -> GenericSourcePos genericSourcePos p = GenericSourcePos (sourceName p) (fromIntegral . unPos $ sourceLine p) (fromIntegral . unPos $ sourceColumn p) journalSourcePos :: SourcePos -> SourcePos -> GenericSourcePos journalSourcePos p p' = JournalSourcePos (sourceName p) (fromIntegral . unPos $ sourceLine p, fromIntegral $ line') where line' | (unPos $ sourceColumn p') == 1 = unPos (sourceLine p') - 1 | otherwise = unPos $ sourceLine p' -- might be at end of file withat last new-line -- | Given a megaparsec ParsedJournal parser, balance assertion flag, file -- path and file content: parse and post-process a Journal, or give an error. parseAndFinaliseJournal :: ErroringJournalParser IO ParsedJournal -> Bool -> FilePath -> Text -> ExceptT String IO Journal parseAndFinaliseJournal parser assrt f txt = do t <- liftIO getClockTime y <- liftIO getCurrentYear ep <- runParserT (evalStateT parser nulljournal {jparsedefaultyear=Just y}) f txt case ep of Right pj -> case journalFinalise t f txt assrt pj of Right j -> return j Left e -> throwError e Left e -> throwError $ parseErrorPretty e parseAndFinaliseJournal' :: JournalParser ParsedJournal -> Bool -> FilePath -> Text -> ExceptT String IO Journal parseAndFinaliseJournal' parser assrt f txt = do t <- liftIO getClockTime y <- liftIO getCurrentYear let ep = runParser (evalStateT parser nulljournal {jparsedefaultyear=Just y}) f txt case ep of Right pj -> case journalFinalise t f txt assrt pj of Right j -> return j Left e -> throwError e Left e -> throwError $ parseErrorPretty e setYear :: Year -> JournalStateParser m () setYear y = modify' (\j -> j{jparsedefaultyear=Just y}) getYear :: JournalStateParser m (Maybe Year) getYear = fmap jparsedefaultyear get setDefaultCommodityAndStyle :: (CommoditySymbol,AmountStyle) -> JournalStateParser m () setDefaultCommodityAndStyle cs = modify' (\j -> j{jparsedefaultcommodity=Just cs}) getDefaultCommodityAndStyle :: JournalStateParser m (Maybe (CommoditySymbol,AmountStyle)) getDefaultCommodityAndStyle = jparsedefaultcommodity `fmap` get pushAccount :: AccountName -> JournalStateParser m () pushAccount acct = modify' (\j -> j{jaccounts = acct : jaccounts j}) pushParentAccount :: AccountName -> JournalStateParser m () pushParentAccount acct = modify' (\j -> j{jparseparentaccounts = acct : jparseparentaccounts j}) popParentAccount :: JournalStateParser m () popParentAccount = do j <- get case jparseparentaccounts j of [] -> unexpected (Tokens ('E' :| "nd of apply account block with no beginning")) (_:rest) -> put j{jparseparentaccounts=rest} getParentAccount :: JournalStateParser m AccountName getParentAccount = fmap (concatAccountNames . reverse . jparseparentaccounts) get addAccountAlias :: MonadState Journal m => AccountAlias -> m () addAccountAlias a = modify' (\(j@Journal{..}) -> j{jparsealiases=a:jparsealiases}) getAccountAliases :: MonadState Journal m => m [AccountAlias] getAccountAliases = fmap jparsealiases get clearAccountAliases :: MonadState Journal m => m () clearAccountAliases = modify' (\(j@Journal{..}) -> j{jparsealiases=[]}) -- getTransactionCount :: MonadState Journal m => m Integer -- getTransactionCount = fmap jparsetransactioncount get -- -- setTransactionCount :: MonadState Journal m => Integer -> m () -- setTransactionCount i = modify' (\j -> j{jparsetransactioncount=i}) -- -- -- | Increment the transaction index by one and return the new value. -- incrementTransactionCount :: MonadState Journal m => m Integer -- incrementTransactionCount = do -- modify' (\j -> j{jparsetransactioncount=jparsetransactioncount j + 1}) -- getTransactionCount journalAddFile :: (FilePath,Text) -> Journal -> Journal journalAddFile f j@Journal{jfiles=fs} = j{jfiles=fs++[f]} -- append, unlike the other fields, even though we do a final reverse, -- to compensate for additional reversal due to including/monoid-concatting -- -- | Terminate parsing entirely, returning the given error message -- -- with the current parse position prepended. -- parserError :: String -> ErroringJournalParser a -- parserError s = do -- pos <- getPosition -- parserErrorAt pos s -- | Terminate parsing entirely, returning the given error message -- with the given parse position prepended. parserErrorAt :: Monad m => SourcePos -> String -> ErroringJournalParser m a parserErrorAt pos s = throwError $ sourcePosPretty pos ++ ":\n" ++ s --- * parsers --- ** transaction bits statusp :: TextParser m ClearedStatus statusp = choice' [ many spacenonewline >> char '*' >> return Cleared , many spacenonewline >> char '!' >> return Pending , return Uncleared ] "cleared status" codep :: TextParser m String codep = try (do { some spacenonewline; char '(' "codep"; anyChar `manyTill` char ')' } ) <|> return "" descriptionp :: JournalStateParser m String descriptionp = many (noneOf (";\n" :: [Char])) --- ** dates -- | Parse a date in YYYY/MM/DD format. -- Hyphen (-) and period (.) are also allowed as separators. -- The year may be omitted if a default year has been set. -- Leading zeroes may be omitted. datep :: JournalStateParser m Day datep = do -- hacky: try to ensure precise errors for invalid dates -- XXX reported error position is not too good -- pos <- genericSourcePos <$> getPosition datestr <- do c <- digitChar cs <- lift $ many $ choice' [digitChar, datesepchar] return $ c:cs let sepchars = nub $ sort $ filter (`elem` datesepchars) datestr when (length sepchars /= 1) $ fail $ "bad date, different separators used: " ++ datestr let dateparts = wordsBy (`elem` datesepchars) datestr currentyear <- getYear [y,m,d] <- case (dateparts,currentyear) of ([m,d],Just y) -> return [show y,m,d] ([_,_],Nothing) -> fail $ "partial date "++datestr++" found, but the current year is unknown" ([y,m,d],_) -> return [y,m,d] _ -> fail $ "bad date: " ++ datestr let maybedate = fromGregorianValid (read y) (read m) (read d) case maybedate of Nothing -> fail $ "bad date: " ++ datestr Just date -> return date "full or partial date" -- | Parse a date and time in YYYY/MM/DD HH:MM[:SS][+-ZZZZ] format. -- Hyphen (-) and period (.) are also allowed as date separators. -- The year may be omitted if a default year has been set. -- Seconds are optional. -- The timezone is optional and ignored (the time is always interpreted as a local time). -- Leading zeroes may be omitted (except in a timezone). datetimep :: JournalStateParser m LocalTime datetimep = do day <- datep lift $ some spacenonewline h <- some digitChar let h' = read h guard $ h' >= 0 && h' <= 23 char ':' m <- some digitChar let m' = read m guard $ m' >= 0 && m' <= 59 s <- optional $ char ':' >> some digitChar let s' = case s of Just sstr -> read sstr Nothing -> 0 guard $ s' >= 0 && s' <= 59 {- tz <- -} optional $ do plusminus <- oneOf ("-+" :: [Char]) d1 <- digitChar d2 <- digitChar d3 <- digitChar d4 <- digitChar return $ plusminus:d1:d2:d3:d4:"" -- ltz <- liftIO $ getCurrentTimeZone -- let tz' = maybe ltz (fromMaybe ltz . parseTime defaultTimeLocale "%z") tz -- return $ localTimeToUTC tz' $ LocalTime day $ TimeOfDay h' m' (fromIntegral s') return $ LocalTime day $ TimeOfDay h' m' (fromIntegral s') secondarydatep :: Day -> JournalStateParser m Day secondarydatep primarydate = do char '=' -- kludgy way to use primary date for default year let withDefaultYear d p = do y <- getYear let (y',_,_) = toGregorian d in setYear y' r <- p when (isJust y) $ setYear $ fromJust y -- XXX -- mapM setYear <$> y return r withDefaultYear primarydate datep -- | -- >> parsewith twoorthreepartdatestringp "2016/01/2" -- Right "2016/01/2" -- twoorthreepartdatestringp = do -- n1 <- some digitChar -- c <- datesepchar -- n2 <- some digitChar -- mn3 <- optional $ char c >> some digitChar -- return $ n1 ++ c:n2 ++ maybe "" (c:) mn3 --- ** account names -- | Parse an account name, then apply any parent account prefix and/or account aliases currently in effect. modifiedaccountnamep :: JournalStateParser m AccountName modifiedaccountnamep = do parent <- getParentAccount aliases <- getAccountAliases a <- lift accountnamep return $ accountNameApplyAliases aliases $ -- XXX accountNameApplyAliasesMemo ? doesn't seem to make a difference joinAccountNames parent a -- | Parse an account name. Account names start with a non-space, may -- have single spaces inside them, and are terminated by two or more -- spaces (or end of input). Also they have one or more components of -- at least one character, separated by the account separator char. -- (This parser will also consume one following space, if present.) accountnamep :: TextParser m AccountName accountnamep = do astr <- do c <- nonspace cs <- striptrailingspace <$> many (nonspace <|> singlespace) return $ c:cs let a = T.pack astr when (accountNameFromComponents (accountNameComponents a) /= a) (fail $ "account name seems ill-formed: "++astr) return a where singlespace = try (do {spacenonewline; do {notFollowedBy spacenonewline; return ' '}}) striptrailingspace "" = "" striptrailingspace s = if last s == ' ' then init s else s -- accountnamechar = notFollowedBy (oneOf "()[]") >> nonspace -- "account name character (non-bracket, non-parenthesis, non-whitespace)" --- ** amounts -- | Parse whitespace then an amount, with an optional left or right -- currency symbol and optional price, or return the special -- "missing" marker amount. spaceandamountormissingp :: Monad m => JournalStateParser m MixedAmount spaceandamountormissingp = try (do lift $ some spacenonewline (Mixed . (:[])) `fmap` amountp <|> return missingmixedamt ) <|> return missingmixedamt #ifdef TESTS assertParseEqual' :: (Show a, Eq a) => (Either ParseError a) -> a -> Assertion assertParseEqual' parse expected = either (assertFailure.show) (`is'` expected) parse is' :: (Eq a, Show a) => a -> a -> Assertion a `is'` e = assertEqual e a test_spaceandamountormissingp = do assertParseEqual' (parseWithState mempty spaceandamountormissingp " $47.18") (Mixed [usd 47.18]) assertParseEqual' (parseWithState mempty spaceandamountormissingp "$47.18") missingmixedamt assertParseEqual' (parseWithState mempty spaceandamountormissingp " ") missingmixedamt assertParseEqual' (parseWithState mempty spaceandamountormissingp "") missingmixedamt #endif -- | Parse a single-commodity amount, with optional symbol on the left or -- right, optional unit or total price, and optional (ignored) -- ledger-style balance assertion or fixed lot price declaration. amountp :: Monad m => JournalStateParser m Amount amountp = try leftsymbolamountp <|> try rightsymbolamountp <|> nosymbolamountp #ifdef TESTS test_amountp = do assertParseEqual' (parseWithState mempty amountp "$47.18") (usd 47.18) assertParseEqual' (parseWithState mempty amountp "$1.") (usd 1 `withPrecision` 0) -- ,"amount with unit price" ~: do assertParseEqual' (parseWithState mempty amountp "$10 @ €0.5") (usd 10 `withPrecision` 0 `at` (eur 0.5 `withPrecision` 1)) -- ,"amount with total price" ~: do assertParseEqual' (parseWithState mempty amountp "$10 @@ €5") (usd 10 `withPrecision` 0 @@ (eur 5 `withPrecision` 0)) #endif -- | Parse an amount from a string, or get an error. amountp' :: String -> Amount amountp' s = case runParser (evalStateT (amountp <* eof) mempty) "" (T.pack s) of Right amt -> amt Left err -> error' $ show err -- XXX should throwError -- | Parse a mixed amount from a string, or get an error. mamountp' :: String -> MixedAmount mamountp' = Mixed . (:[]) . amountp' signp :: TextParser m String signp = do sign <- optional $ oneOf ("+-" :: [Char]) return $ case sign of Just '-' -> "-" _ -> "" leftsymbolamountp :: Monad m => JournalStateParser m Amount leftsymbolamountp = do sign <- lift signp c <- lift commoditysymbolp sp <- lift $ many spacenonewline (q,prec,mdec,mgrps) <- lift numberp let s = amountstyle{ascommodityside=L, ascommodityspaced=not $ null sp, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} p <- priceamountp let applysign = if sign=="-" then negate else id return $ applysign $ Amount c q p s "left-symbol amount" rightsymbolamountp :: Monad m => JournalStateParser m Amount rightsymbolamountp = do (q,prec,mdec,mgrps) <- lift numberp sp <- lift $ many spacenonewline c <- lift commoditysymbolp p <- priceamountp let s = amountstyle{ascommodityside=R, ascommodityspaced=not $ null sp, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} return $ Amount c q p s "right-symbol amount" nosymbolamountp :: Monad m => JournalStateParser m Amount nosymbolamountp = do (q,prec,mdec,mgrps) <- lift numberp p <- priceamountp -- apply the most recently seen default commodity and style to this commodityless amount defcs <- getDefaultCommodityAndStyle let (c,s) = case defcs of Just (defc,defs) -> (defc, defs{asprecision=max (asprecision defs) prec}) Nothing -> ("", amountstyle{asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}) return $ Amount c q p s "no-symbol amount" commoditysymbolp :: TextParser m CommoditySymbol commoditysymbolp = (quotedcommoditysymbolp <|> simplecommoditysymbolp) "commodity symbol" quotedcommoditysymbolp :: TextParser m CommoditySymbol quotedcommoditysymbolp = do char '"' s <- some $ noneOf (";\n\"" :: [Char]) char '"' return $ T.pack s simplecommoditysymbolp :: TextParser m CommoditySymbol simplecommoditysymbolp = T.pack <$> some (noneOf nonsimplecommoditychars) priceamountp :: Monad m => JournalStateParser m Price priceamountp = try (do lift (many spacenonewline) char '@' try (do char '@' lift (many spacenonewline) a <- amountp -- XXX can parse more prices ad infinitum, shouldn't return $ TotalPrice a) <|> (do lift (many spacenonewline) a <- amountp -- XXX can parse more prices ad infinitum, shouldn't return $ UnitPrice a)) <|> return NoPrice partialbalanceassertionp :: Monad m => JournalStateParser m (Maybe Amount) partialbalanceassertionp = try (do lift (many spacenonewline) char '=' lift (many spacenonewline) a <- amountp -- XXX should restrict to a simple amount return $ Just $ a) <|> return Nothing -- balanceassertion :: Monad m => TextParser m (Maybe MixedAmount) -- balanceassertion = -- try (do -- lift (many spacenonewline) -- string "==" -- lift (many spacenonewline) -- a <- amountp -- XXX should restrict to a simple amount -- return $ Just $ Mixed [a]) -- <|> return Nothing -- http://ledger-cli.org/3.0/doc/ledger3.html#Fixing-Lot-Prices fixedlotpricep :: Monad m => JournalStateParser m (Maybe Amount) fixedlotpricep = try (do lift (many spacenonewline) char '{' lift (many spacenonewline) char '=' lift (many spacenonewline) a <- amountp -- XXX should restrict to a simple amount lift (many spacenonewline) char '}' return $ Just a) <|> return Nothing -- | Parse a string representation of a number for its value and display -- attributes. -- -- Some international number formats are accepted, eg either period or comma -- may be used for the decimal point, and the other of these may be used for -- separating digit groups in the integer part. See -- http://en.wikipedia.org/wiki/Decimal_separator for more examples. -- -- This returns: the parsed numeric value, the precision (number of digits -- seen following the decimal point), the decimal point character used if any, -- and the digit group style if any. -- numberp :: TextParser m (Quantity, Int, Maybe Char, Maybe DigitGroupStyle) numberp = do -- a number is an optional sign followed by a sequence of digits possibly -- interspersed with periods, commas, or both -- ptrace "numberp" sign <- signp parts <- some $ choice' [some digitChar, some $ char ',', some $ char '.'] dbg8 "numberp parsed" (sign,parts) `seq` return () -- check the number is well-formed and identify the decimal point and digit -- group separator characters used, if any let (numparts, puncparts) = partition numeric parts (ok, mdecimalpoint, mseparator) = case (numparts, puncparts) of ([],_) -> (False, Nothing, Nothing) -- no digits, not ok (_,[]) -> (True, Nothing, Nothing) -- digits with no punctuation, ok (_,[[d]]) -> (True, Just d, Nothing) -- just a single punctuation of length 1, assume it's a decimal point (_,[_]) -> (False, Nothing, Nothing) -- a single punctuation of some other length, not ok (_,_:_:_) -> -- two or more punctuations let (s:ss, d) = (init puncparts, last puncparts) -- the leftmost is a separator and the rightmost may be a decimal point in if any ((/=1).length) puncparts -- adjacent punctuation chars, not ok || any (s/=) ss -- separator chars vary, not ok || head parts == s -- number begins with a separator char, not ok then (False, Nothing, Nothing) else if s == d then (True, Nothing, Just $ head s) -- just one kind of punctuation - must be separators else (True, Just $ head d, Just $ head s) -- separator(s) and a decimal point unless ok $ fail $ "number seems ill-formed: "++concat parts -- get the digit group sizes and digit group style if any let (intparts',fracparts') = span ((/= mdecimalpoint) . Just . head) parts (intparts, fracpart) = (filter numeric intparts', filter numeric fracparts') groupsizes = reverse $ case map length intparts of (a:b:cs) | a < b -> b:cs gs -> gs mgrps = (`DigitGroups` groupsizes) <$> mseparator -- put the parts back together without digit group separators, get the precision and parse the value let int = concat $ "":intparts frac = concat $ "":fracpart precision = length frac int' = if null int then "0" else int frac' = if null frac then "0" else frac quantity = read $ sign++int'++"."++frac' -- this read should never fail return $ dbg8 "numberp quantity,precision,mdecimalpoint,mgrps" (quantity,precision,mdecimalpoint,mgrps) "numberp" where numeric = isNumber . headDef '_' -- test_numberp = do -- let s `is` n = assertParseEqual (parseWithState mempty numberp s) n -- assertFails = assertBool . isLeft . parseWithState mempty numberp -- assertFails "" -- "0" `is` (0, 0, '.', ',', []) -- "1" `is` (1, 0, '.', ',', []) -- "1.1" `is` (1.1, 1, '.', ',', []) -- "1,000.1" `is` (1000.1, 1, '.', ',', [3]) -- "1.00.000,1" `is` (100000.1, 1, ',', '.', [3,2]) -- "1,000,000" `is` (1000000, 0, '.', ',', [3,3]) -- "1." `is` (1, 0, '.', ',', []) -- "1," `is` (1, 0, ',', '.', []) -- ".1" `is` (0.1, 1, '.', ',', []) -- ",1" `is` (0.1, 1, ',', '.', []) -- assertFails "1,000.000,1" -- assertFails "1.000,000.1" -- assertFails "1,000.000.1" -- assertFails "1,,1" -- assertFails "1..1" -- assertFails ".1," -- assertFails ",1." --- ** comments multilinecommentp :: JournalStateParser m () multilinecommentp = do string "comment" >> lift (many spacenonewline) >> newline go where go = try (eof <|> (string "end comment" >> newline >> return ())) <|> (anyLine >> go) anyLine = anyChar `manyTill` newline emptyorcommentlinep :: JournalStateParser m () emptyorcommentlinep = do lift (many spacenonewline) >> (commentp <|> (lift (many spacenonewline) >> newline >> return "")) return () -- | Parse a possibly multi-line comment following a semicolon. followingcommentp :: JournalStateParser m Text followingcommentp = -- ptrace "followingcommentp" do samelinecomment <- lift (many spacenonewline) >> (try semicoloncommentp <|> (newline >> return "")) newlinecomments <- many (try (lift (some spacenonewline) >> semicoloncommentp)) return $ T.unlines $ samelinecomment:newlinecomments -- | Parse a possibly multi-line comment following a semicolon, and -- any tags and/or posting dates within it. Posting dates can be -- expressed with "date"/"date2" tags and/or bracketed dates. The -- dates are parsed in full here so that errors are reported in the -- right position. Missing years can be inferred if a default date is -- provided. -- -- >>> rejp (followingcommentandtagsp (Just $ fromGregorian 2000 1 2)) "; a:b, date:3/4, [=5/6]" -- Right ("a:b, date:3/4, [=5/6]\n",[("a","b"),("date","3/4")],Just 2000-03-04,Just 2000-05-06) -- -- Year unspecified and no default provided -> unknown year error, at correct position: -- >>> rejp (followingcommentandtagsp Nothing) " ; xxx date:3/4\n ; second line" -- Left ...1:22...partial date 3/4 found, but the current year is unknown... -- -- Date tag value contains trailing text - forgot the comma, confused: -- the syntaxes ? We'll accept the leading date anyway -- >>> rejp (followingcommentandtagsp (Just $ fromGregorian 2000 1 2)) "; date:3/4=5/6" -- Right ("date:3/4=5/6\n",[("date","3/4=5/6")],Just 2000-03-04,Nothing) -- followingcommentandtagsp :: MonadIO m => Maybe Day -> ErroringJournalParser m (Text, [Tag], Maybe Day, Maybe Day) followingcommentandtagsp mdefdate = do -- pdbg 0 "followingcommentandtagsp" -- Parse a single or multi-line comment, starting on this line or the next one. -- Save the starting position and preserve all whitespace for the subsequent re-parsing, -- to get good error positions. startpos <- getPosition commentandwhitespace :: String <- do let semicoloncommentp' = (:) <$> char ';' <*> anyChar `manyTill` eolof sp1 <- lift (many spacenonewline) l1 <- try (lift semicoloncommentp') <|> (newline >> return "") ls <- lift . many $ try ((++) <$> some spacenonewline <*> semicoloncommentp') return $ unlines $ (sp1 ++ l1) : ls let comment = T.pack $ unlines $ map (lstrip . dropWhile (==';') . strip) $ lines commentandwhitespace -- pdbg 0 $ "commentws:"++show commentandwhitespace -- pdbg 0 $ "comment:"++show comment -- Reparse the comment for any tags. tags <- case runTextParser (setPosition startpos >> tagsp) $ T.pack commentandwhitespace of Right ts -> return ts Left e -> throwError $ parseErrorPretty e -- pdbg 0 $ "tags: "++show tags -- Reparse the comment for any posting dates. Use the transaction date for defaults, if provided. epdates <- liftIO $ rejp (setPosition startpos >> postingdatesp mdefdate) $ T.pack commentandwhitespace pdates <- case epdates of Right ds -> return ds Left e -> throwError e -- pdbg 0 $ "pdates: "++show pdates let mdate = headMay $ map snd $ filter ((=="date").fst) pdates mdate2 = headMay $ map snd $ filter ((=="date2").fst) pdates return (comment, tags, mdate, mdate2) commentp :: JournalStateParser m Text commentp = commentStartingWithp commentchars commentchars :: [Char] commentchars = "#;*" semicoloncommentp :: JournalStateParser m Text semicoloncommentp = commentStartingWithp ";" commentStartingWithp :: [Char] -> JournalStateParser m Text commentStartingWithp cs = do -- ptrace "commentStartingWith" oneOf cs lift (many spacenonewline) l <- anyChar `manyTill` (lift eolof) optional newline return $ T.pack l --- ** tags -- | Extract any tags (name:value ended by comma or newline) embedded in a string. -- -- >>> commentTags "a b:, c:c d:d, e" -- [("b",""),("c","c d:d")] -- -- >>> commentTags "a [1/1/1] [1/1] [1], [=1/1/1] [=1/1] [=1] [1/1=1/1/1] [1=1/1/1] b:c" -- [("b","c")] -- -- --[("date","1/1/1"),("date","1/1"),("date2","1/1/1"),("date2","1/1"),("date","1/1"),("date2","1/1/1"),("date","1"),("date2","1/1/1")] -- -- >>> commentTags "\na b:, \nd:e, f" -- [("b",""),("d","e")] -- commentTags :: Text -> [Tag] commentTags s = case runTextParser tagsp s of Right r -> r Left _ -> [] -- shouldn't happen -- | Parse all tags found in a string. tagsp :: Parser [Tag] tagsp = -- do -- pdbg 0 $ "tagsp" many (try (nontagp >> tagp)) -- | Parse everything up till the first tag. -- -- >>> rtp nontagp "\na b:, \nd:e, f" -- Right "\na " nontagp :: Parser String nontagp = -- do -- pdbg 0 "nontagp" -- anyChar `manyTill` (lookAhead (try (tagorbracketeddatetagsp Nothing >> return ()) <|> eof)) anyChar `manyTill` lookAhead (try (void tagp) <|> eof) -- XXX costly ? -- | Tags begin with a colon-suffixed tag name (a word beginning with -- a letter) and are followed by a tag value (any text up to a comma -- or newline, whitespace-stripped). -- -- >>> rtp tagp "a:b b , c AuxDate: 4/2" -- Right ("a","b b") -- tagp :: Parser Tag tagp = do -- pdbg 0 "tagp" n <- tagnamep v <- tagvaluep return (n,v) -- | -- >>> rtp tagnamep "a:" -- Right "a" tagnamep :: Parser Text tagnamep = -- do -- pdbg 0 "tagnamep" T.pack <$> some (noneOf (": \t\n" :: [Char])) <* char ':' tagvaluep :: TextParser m Text tagvaluep = do -- ptrace "tagvalue" v <- anyChar `manyTill` (void (try (char ',')) <|> eolof) return $ T.pack $ strip $ reverse $ dropWhile (==',') $ reverse $ strip v --- ** posting dates -- | Parse all posting dates found in a string. Posting dates can be -- expressed with date/date2 tags and/or bracketed dates. The dates -- are parsed fully to give useful errors. Missing years can be -- inferred only if a default date is provided. -- postingdatesp :: Monad m => Maybe Day -> ErroringJournalParser m [(TagName,Day)] postingdatesp mdefdate = do -- pdbg 0 $ "postingdatesp" let p = ((:[]) <$> datetagp mdefdate) <|> bracketeddatetagsp mdefdate nonp = many (notFollowedBy p >> anyChar) -- anyChar `manyTill` (lookAhead (try (p >> return ()) <|> eof)) concat <$> many (try (nonp >> p)) --- ** date tags -- | Date tags are tags with name "date" or "date2". Their value is -- parsed as a date, using the provided default date if any for -- inferring a missing year if needed. Any error in date parsing is -- reported and terminates parsing. -- -- >>> rejp (datetagp Nothing) "date: 2000/1/2 " -- Right ("date",2000-01-02) -- -- >>> rejp (datetagp (Just $ fromGregorian 2001 2 3)) "date2:3/4" -- Right ("date2",2001-03-04) -- -- >>> rejp (datetagp Nothing) "date: 3/4" -- Left ...1:9...partial date 3/4 found, but the current year is unknown... -- datetagp :: Monad m => Maybe Day -> ErroringJournalParser m (TagName,Day) datetagp mdefdate = do -- pdbg 0 "datetagp" string "date" n <- T.pack . fromMaybe "" <$> optional (string "2") char ':' startpos <- getPosition v <- lift tagvaluep -- re-parse value as a date. j <- get let ep :: Either (ParseError Char Dec) Day ep = parseWithState' j{jparsedefaultyear=first3.toGregorian <$> mdefdate} -- The value extends to a comma, newline, or end of file. -- It seems like ignoring any extra stuff following a date -- gives better errors here. (do setPosition startpos datep) -- <* eof) v case ep of Left e -> throwError $ parseErrorPretty e Right d -> return ("date"<>n, d) --- ** bracketed dates -- tagorbracketeddatetagsp :: Monad m => Maybe Day -> TextParser u m [Tag] -- tagorbracketeddatetagsp mdefdate = -- bracketeddatetagsp mdefdate <|> ((:[]) <$> tagp) -- | Parse Ledger-style bracketed posting dates ([DATE=DATE2]), as -- "date" and/or "date2" tags. Anything that looks like an attempt at -- this (a square-bracketed sequence of 0123456789/-.= containing at -- least one digit and one date separator) is also parsed, and will -- throw an appropriate error. -- -- The dates are parsed in full here so that errors are reported in -- the right position. A missing year in DATE can be inferred if a -- default date is provided. A missing year in DATE2 will be inferred -- from DATE. -- -- >>> rejp (bracketeddatetagsp Nothing) "[2016/1/2=3/4]" -- Right [("date",2016-01-02),("date2",2016-03-04)] -- -- >>> rejp (bracketeddatetagsp Nothing) "[1]" -- Left ...not a bracketed date... -- -- >>> rejp (bracketeddatetagsp Nothing) "[2016/1/32]" -- Left ...1:11:...bad date: 2016/1/32... -- -- >>> rejp (bracketeddatetagsp Nothing) "[1/31]" -- Left ...1:6:...partial date 1/31 found, but the current year is unknown... -- -- >>> rejp (bracketeddatetagsp Nothing) "[0123456789/-.=/-.=]" -- Left ...1:15:...bad date, different separators... -- bracketeddatetagsp :: Monad m => Maybe Day -> ErroringJournalParser m [(TagName, Day)] bracketeddatetagsp mdefdate = do -- pdbg 0 "bracketeddatetagsp" char '[' startpos <- getPosition let digits = "0123456789" s <- some (oneOf $ '=':digits++datesepchars) char ']' unless (any (`elem` s) digits && any (`elem` datesepchars) s) $ fail "not a bracketed date" -- looks sufficiently like a bracketed date, now we -- re-parse as dates and throw any errors j <- get let ep :: Either (ParseError Char Dec) (Maybe Day, Maybe Day) ep = parseWithState' j{jparsedefaultyear=first3.toGregorian <$> mdefdate} (do setPosition startpos md1 <- optional datep maybe (return ()) (setYear.first3.toGregorian) md1 md2 <- optional $ char '=' >> datep eof return (md1,md2) ) (T.pack s) case ep of Left e -> throwError $ parseErrorPretty e Right (md1,md2) -> return $ catMaybes [("date",) <$> md1, ("date2",) <$> md2] hledger-lib-1.2/Hledger/Read/CsvReader.hs0000644000000000000000000007756313066173044016405 0ustar0000000000000000{-# LANGUAGE CPP #-} {-| A reader for CSV data, using an extra rules file to help interpret the data. -} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE TypeSynonymInstances #-} {-# LANGUAGE FlexibleInstances #-} module Hledger.Read.CsvReader ( -- * Reader reader, -- * Misc. CsvRecord, -- rules, rulesFileFor, parseRulesFile, parseAndValidateCsvRules, expandIncludes, transactionFromCsvRecord, -- * Tests tests_Hledger_Read_CsvReader ) where import Prelude () import Prelude.Compat hiding (getContents) import Control.Exception hiding (try) import Control.Monad import Control.Monad.Except import Control.Monad.State.Strict (StateT, get, modify', evalStateT) -- import Test.HUnit import Data.Char (toLower, isDigit, isSpace) import Data.List.Compat import Data.Maybe import Data.Ord import qualified Data.Set as S import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar (Day) #if MIN_VERSION_time(1,5,0) import Data.Time.Format (parseTimeM, defaultTimeLocale) #else import Data.Time.Format (parseTime) import System.Locale (defaultTimeLocale) #endif import Safe import System.Directory (doesFileExist) import System.FilePath import System.IO (stderr) import Test.HUnit hiding (State) import Text.CSV (parseCSV, CSV) import Text.Megaparsec hiding (parse, State) import Text.Megaparsec.Text import qualified Text.Parsec as Parsec import Text.Printf (hPrintf,printf) import Hledger.Data import Hledger.Utils.UTF8IOCompat (getContents) import Hledger.Utils import Hledger.Read.Common (amountp, statusp, genericSourcePos) reader :: Reader reader = Reader {rFormat = "csv" ,rExtensions = ["csv"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from CSV data, or give an error. -- XXX currently ignores the string and reads from the file path parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse rulesfile _ f t = do r <- liftIO $ readJournalFromCsv rulesfile f t case r of Left e -> throwError e Right j -> return $ journalNumberAndTieTransactions j -- XXX does not use parseAndFinaliseJournal like the other readers -- | Read a Journal from the given CSV data (and filename, used for error -- messages), or return an error. Proceed as follows: -- @ -- 1. parse CSV conversion rules from the specified rules file, or from -- the default rules file for the specified CSV file, if it exists, -- or throw a parse error; if it doesn't exist, use built-in default rules -- 2. parse the CSV data, or throw a parse error -- 3. convert the CSV records to transactions using the rules -- 4. if the rules file didn't exist, create it with the default rules and filename -- 5. return the transactions as a Journal -- @ readJournalFromCsv :: Maybe FilePath -> FilePath -> Text -> IO (Either String Journal) readJournalFromCsv Nothing "-" _ = return $ Left "please use --rules-file when reading CSV from stdin" readJournalFromCsv mrulesfile csvfile csvdata = handle (\e -> return $ Left $ show (e :: IOException)) $ do let throwerr = throw.userError -- parse rules let rulesfile = fromMaybe (rulesFileFor csvfile) mrulesfile rulesfileexists <- doesFileExist rulesfile rulestext <- if rulesfileexists then do hPrintf stderr "using conversion rules file %s\n" rulesfile liftIO $ (readFile' rulesfile >>= expandIncludes (takeDirectory rulesfile)) else return $ defaultRulesText rulesfile rules <- liftIO (runExceptT $ parseAndValidateCsvRules rulesfile rulestext) >>= either throwerr return dbg2IO "rules" rules -- apply skip directive let skip = maybe 0 oneorerror $ getDirective "skip" rules where oneorerror "" = 1 oneorerror s = readDef (throwerr $ "could not parse skip value: " ++ show s) s -- parse csv -- parsec seems to fail if you pass it "-" here XXX try again with megaparsec let parsecfilename = if csvfile == "-" then "(stdin)" else csvfile records <- (either throwerr id . dbg2 "validateCsv" . validateCsv skip . dbg2 "parseCsv") `fmap` parseCsv parsecfilename (T.unpack csvdata) dbg1IO "first 3 csv records" $ take 3 records -- identify header lines -- let (headerlines, datalines) = identifyHeaderLines records -- mfieldnames = lastMay headerlines -- convert to transactions and return as a journal let txns = snd $ mapAccumL (\pos r -> (pos, transactionFromCsvRecord (let SourcePos name line col = pos in SourcePos name (unsafePos $ unPos line + 1) col) rules r)) (initialPos parsecfilename) records -- heuristic: if the records appear to have been in reverse date order, -- reverse them all as well as doing a txn date sort, -- so that same-day txns' original order is preserved txns' | length txns > 1 && tdate (head txns) > tdate (last txns) = reverse txns | otherwise = txns when (not rulesfileexists) $ do hPrintf stderr "created default conversion rules file %s, edit this for better results\n" rulesfile writeFile rulesfile $ T.unpack rulestext return $ Right nulljournal{jtxns=sortBy (comparing tdate) txns'} parseCsv :: FilePath -> String -> IO (Either Parsec.ParseError CSV) parseCsv path csvdata = case path of "-" -> liftM (parseCSV "(stdin)") getContents _ -> return $ parseCSV path csvdata -- | Return the cleaned up and validated CSV data, or an error. validateCsv :: Int -> Either Parsec.ParseError CSV -> Either String [CsvRecord] validateCsv _ (Left e) = Left $ show e validateCsv numhdrlines (Right rs) = validate $ drop numhdrlines $ filternulls rs where filternulls = filter (/=[""]) validate [] = Left "no CSV records found" validate rs@(first:_) | isJust lessthan2 = let r = fromJust lessthan2 in Left $ printf "CSV record %s has less than two fields" (show r) | isJust different = let r = fromJust different in Left $ printf "the first CSV record %s has %d fields but %s has %d" (show first) length1 (show r) (length r) | otherwise = Right rs where length1 = length first lessthan2 = headMay $ filter ((<2).length) rs different = headMay $ filter ((/=length1).length) rs -- -- | The highest (0-based) field index referenced in the field -- -- definitions, or -1 if no fields are defined. -- maxFieldIndex :: CsvRules -> Int -- maxFieldIndex r = maximumDef (-1) $ catMaybes [ -- dateField r -- ,statusField r -- ,codeField r -- ,amountField r -- ,amountInField r -- ,amountOutField r -- ,currencyField r -- ,accountField r -- ,account2Field r -- ,date2Field r -- ] -- rulesFileFor :: CliOpts -> FilePath -> FilePath -- rulesFileFor CliOpts{rules_file_=Just f} _ = f -- rulesFileFor CliOpts{rules_file_=Nothing} csvfile = replaceExtension csvfile ".rules" rulesFileFor :: FilePath -> FilePath rulesFileFor = (++ ".rules") csvFileFor :: FilePath -> FilePath csvFileFor = reverse . drop 6 . reverse defaultRulesText :: FilePath -> Text defaultRulesText csvfile = T.pack $ unlines ["# hledger csv conversion rules for " ++ csvFileFor (takeFileName csvfile) ,"# cf http://hledger.org/manual#csv-files" ,"" ,"account1 assets:bank:checking" ,"" ,"fields date, description, amount" ,"" ,"#skip 1" ,"" ,"#date-format %-d/%-m/%Y" ,"#date-format %-m/%-d/%Y" ,"#date-format %Y-%h-%d" ,"" ,"#currency $" ,"" ,"if ITUNES" ," account2 expenses:entertainment" ,"" ,"if (TO|FROM) SAVINGS" ," account2 assets:bank:savings\n" ] -------------------------------------------------------------------------------- -- Conversion rules parsing {- Grammar for the CSV conversion rules, more or less: RULES: RULE* RULE: ( FIELD-LIST | FIELD-ASSIGNMENT | CONDITIONAL-BLOCK | SKIP | DATE-FORMAT | COMMENT | BLANK ) NEWLINE FIELD-LIST: fields SPACE FIELD-NAME ( SPACE? , SPACE? FIELD-NAME )* FIELD-NAME: QUOTED-FIELD-NAME | BARE-FIELD-NAME QUOTED-FIELD-NAME: " (any CHAR except double-quote)+ " BARE-FIELD-NAME: any CHAR except space, tab, #, ; FIELD-ASSIGNMENT: JOURNAL-FIELD ASSIGNMENT-SEPARATOR FIELD-VALUE JOURNAL-FIELD: date | date2 | status | code | description | comment | account1 | account2 | amount | JOURNAL-PSEUDO-FIELD JOURNAL-PSEUDO-FIELD: amount-in | amount-out | currency ASSIGNMENT-SEPARATOR: SPACE | ( : SPACE? ) FIELD-VALUE: VALUE (possibly containing CSV-FIELD-REFERENCEs) CSV-FIELD-REFERENCE: % CSV-FIELD CSV-FIELD: ( FIELD-NAME | FIELD-NUMBER ) (corresponding to a CSV field) FIELD-NUMBER: DIGIT+ CONDITIONAL-BLOCK: if ( FIELD-MATCHER NEWLINE )+ INDENTED-BLOCK FIELD-MATCHER: ( CSV-FIELD-NAME SPACE? )? ( MATCHOP SPACE? )? PATTERNS MATCHOP: ~ PATTERNS: ( NEWLINE REGEXP )* REGEXP INDENTED-BLOCK: ( SPACE ( FIELD-ASSIGNMENT | COMMENT ) NEWLINE )+ REGEXP: ( NONSPACE CHAR* ) SPACE? VALUE: SPACE? ( CHAR* ) SPACE? COMMENT: SPACE? COMMENT-CHAR VALUE COMMENT-CHAR: # | ; NONSPACE: any CHAR not a SPACE-CHAR BLANK: SPACE? SPACE: SPACE-CHAR+ SPACE-CHAR: space | tab CHAR: any character except newline DIGIT: 0-9 -} {- | A set of data definitions and account-matching patterns sufficient to convert a particular CSV data file into meaningful journal transactions. -} data CsvRules = CsvRules { rdirectives :: [(DirectiveName,String)], rcsvfieldindexes :: [(CsvFieldName, CsvFieldIndex)], rassignments :: [(JournalFieldName, FieldTemplate)], rconditionalblocks :: [ConditionalBlock] } deriving (Show, Eq) type CsvRulesParser a = StateT CsvRules Parser a type DirectiveName = String type CsvFieldName = String type CsvFieldIndex = Int type JournalFieldName = String type FieldTemplate = String type ConditionalBlock = ([RecordMatcher], [(JournalFieldName, FieldTemplate)]) -- block matches if all RecordMatchers match type RecordMatcher = [RegexpPattern] -- match if any regexps match any of the csv fields -- type FieldMatcher = (CsvFieldName, [RegexpPattern]) -- match if any regexps match this csv field type DateFormat = String type RegexpPattern = String rules = CsvRules { rdirectives=[], rcsvfieldindexes=[], rassignments=[], rconditionalblocks=[] } addDirective :: (DirectiveName, String) -> CsvRules -> CsvRules addDirective d r = r{rdirectives=d:rdirectives r} addAssignment :: (JournalFieldName, FieldTemplate) -> CsvRules -> CsvRules addAssignment a r = r{rassignments=a:rassignments r} setIndexesAndAssignmentsFromList :: [CsvFieldName] -> CsvRules -> CsvRules setIndexesAndAssignmentsFromList fs r = addAssignmentsFromList fs . setCsvFieldIndexesFromList fs $ r setCsvFieldIndexesFromList :: [CsvFieldName] -> CsvRules -> CsvRules setCsvFieldIndexesFromList fs r = r{rcsvfieldindexes=zip fs [1..]} addAssignmentsFromList :: [CsvFieldName] -> CsvRules -> CsvRules addAssignmentsFromList fs r = foldl' maybeAddAssignment r journalfieldnames where maybeAddAssignment rules f = (maybe id addAssignmentFromIndex $ elemIndex f fs) rules where addAssignmentFromIndex i = addAssignment (f, "%"++show (i+1)) addConditionalBlock :: ConditionalBlock -> CsvRules -> CsvRules addConditionalBlock b r = r{rconditionalblocks=b:rconditionalblocks r} getDirective :: DirectiveName -> CsvRules -> Maybe FieldTemplate getDirective directivename = lookup directivename . rdirectives instance ShowErrorComponent String where showErrorComponent = id -- | An error-throwing action that parses this file's content -- as CSV conversion rules, interpolating any included files first, -- and runs some extra validation checks. parseRulesFile :: FilePath -> ExceptT String IO CsvRules parseRulesFile f = liftIO (readFile' f >>= expandIncludes (takeDirectory f)) >>= parseAndValidateCsvRules f -- | Look for hledger rules file-style include directives in this text, -- and interpolate the included files, recursively. -- Included file paths may be relative to the directory of the -- provided file path. -- This is a cheap hack to avoid rewriting the CSV rules parser. expandIncludes :: FilePath -> T.Text -> IO T.Text expandIncludes basedir content = do let (ls,rest) = break (T.isPrefixOf "include") $ T.lines content case rest of [] -> return $ T.unlines ls ((T.stripPrefix "include" -> Just f):ls') -> do let f' = basedir dropWhile isSpace (T.unpack f) basedir' = takeDirectory f' included <- readFile' f' >>= expandIncludes basedir' return $ T.unlines [T.unlines ls, included, T.unlines ls'] ls' -> return $ T.unlines $ ls ++ ls' -- should never get here -- | An error-throwing action that parses this text as CSV conversion rules -- and runs some extra validation checks. The file path is for error messages. parseAndValidateCsvRules :: FilePath -> T.Text -> ExceptT String IO CsvRules parseAndValidateCsvRules rulesfile s = do let rules = parseCsvRules rulesfile s case rules of Left e -> ExceptT $ return $ Left $ parseErrorPretty e Right r -> do r_ <- liftIO $ runExceptT $ validateRules r ExceptT $ case r_ of Left e -> return $ Left $ parseErrorPretty $ toParseError e Right r -> return $ Right r where toParseError :: forall s. Ord s => s -> ParseError Char s toParseError s = (mempty :: ParseError Char s) { errorCustom = S.singleton s} -- | Parse this text as CSV conversion rules. The file path is for error messages. parseCsvRules :: FilePath -> T.Text -> Either (ParseError Char Dec) CsvRules -- parseCsvRules rulesfile s = runParser csvrulesfile nullrules{baseAccount=takeBaseName rulesfile} rulesfile s parseCsvRules rulesfile s = runParser (evalStateT rulesp rules) rulesfile s -- | Return the validated rules, or an error. validateRules :: CsvRules -> ExceptT String IO CsvRules validateRules rules = do unless (isAssigned "date") $ ExceptT $ return $ Left "Please specify (at top level) the date field. Eg: date %1\n" unless ((amount && not (amountin || amountout)) || (not amount && (amountin && amountout))) $ ExceptT $ return $ Left "Please specify (at top level) either the amount field, or both the amount-in and amount-out fields. Eg: amount %2\n" ExceptT $ return $ Right rules where amount = isAssigned "amount" amountin = isAssigned "amount-in" amountout = isAssigned "amount-out" isAssigned f = isJust $ getEffectiveAssignment rules [] f -- parsers rulesp :: CsvRulesParser CsvRules rulesp = do many $ choiceInState [blankorcommentlinep "blank or comment line" ,(directivep >>= modify' . addDirective) "directive" ,(fieldnamelistp >>= modify' . setIndexesAndAssignmentsFromList) "field name list" ,(fieldassignmentp >>= modify' . addAssignment) "field assignment" ,(conditionalblockp >>= modify' . addConditionalBlock) "conditional block" ] eof r <- get return r{rdirectives=reverse $ rdirectives r ,rassignments=reverse $ rassignments r ,rconditionalblocks=reverse $ rconditionalblocks r } blankorcommentlinep :: CsvRulesParser () blankorcommentlinep = lift (pdbg 3 "trying blankorcommentlinep") >> choiceInState [blanklinep, commentlinep] blanklinep :: CsvRulesParser () blanklinep = lift (many spacenonewline) >> newline >> return () "blank line" commentlinep :: CsvRulesParser () commentlinep = lift (many spacenonewline) >> commentcharp >> lift restofline >> return () "comment line" commentcharp :: CsvRulesParser Char commentcharp = oneOf (";#*" :: [Char]) directivep :: CsvRulesParser (DirectiveName, String) directivep = (do lift $ pdbg 3 "trying directive" d <- choiceInState $ map string directives v <- (((char ':' >> lift (many spacenonewline)) <|> lift (some spacenonewline)) >> directivevalp) <|> (optional (char ':') >> lift (many spacenonewline) >> lift eolof >> return "") return (d,v) ) "directive" directives = ["date-format" -- ,"default-account1" -- ,"default-currency" -- ,"skip-lines" -- old ,"skip" -- ,"base-account" -- ,"base-currency" ] directivevalp :: CsvRulesParser String directivevalp = anyChar `manyTill` lift eolof fieldnamelistp :: CsvRulesParser [CsvFieldName] fieldnamelistp = (do lift $ pdbg 3 "trying fieldnamelist" string "fields" optional $ char ':' lift (some spacenonewline) let separator = lift (many spacenonewline) >> char ',' >> lift (many spacenonewline) f <- fromMaybe "" <$> optional fieldnamep fs <- some $ (separator >> fromMaybe "" <$> optional fieldnamep) lift restofline return $ map (map toLower) $ f:fs ) "field name list" fieldnamep :: CsvRulesParser String fieldnamep = quotedfieldnamep <|> barefieldnamep quotedfieldnamep :: CsvRulesParser String quotedfieldnamep = do char '"' f <- some $ noneOf ("\"\n:;#~" :: [Char]) char '"' return f barefieldnamep :: CsvRulesParser String barefieldnamep = some $ noneOf (" \t\n,;#~" :: [Char]) fieldassignmentp :: CsvRulesParser (JournalFieldName, FieldTemplate) fieldassignmentp = do lift $ pdbg 3 "trying fieldassignmentp" f <- journalfieldnamep assignmentseparatorp v <- fieldvalp return (f,v) "field assignment" journalfieldnamep :: CsvRulesParser String journalfieldnamep = lift (pdbg 2 "trying journalfieldnamep") >> choiceInState (map string journalfieldnames) journalfieldnames = [-- pseudo fields: "amount-in" ,"amount-out" ,"currency" -- standard fields: ,"date2" ,"date" ,"status" ,"code" ,"description" ,"amount" ,"account1" ,"account2" ,"comment" ] assignmentseparatorp :: CsvRulesParser () assignmentseparatorp = do lift $ pdbg 3 "trying assignmentseparatorp" choice [ -- try (lift (many spacenonewline) >> oneOf ":="), try (lift (many spacenonewline) >> char ':'), spaceChar ] _ <- lift (many spacenonewline) return () fieldvalp :: CsvRulesParser String fieldvalp = do lift $ pdbg 2 "trying fieldvalp" anyChar `manyTill` lift eolof conditionalblockp :: CsvRulesParser ConditionalBlock conditionalblockp = do lift $ pdbg 3 "trying conditionalblockp" string "if" >> lift (many spacenonewline) >> optional newline ms <- some recordmatcherp as <- many (lift (some spacenonewline) >> fieldassignmentp) when (null as) $ fail "start of conditional block found, but no assignment rules afterward\n(assignment rules in a conditional block should be indented)\n" return (ms, as) "conditional block" recordmatcherp :: CsvRulesParser [String] recordmatcherp = do lift $ pdbg 2 "trying recordmatcherp" -- pos <- currentPos _ <- optional (matchoperatorp >> lift (many spacenonewline) >> optional newline) ps <- patternsp when (null ps) $ fail "start of record matcher found, but no patterns afterward\n(patterns should not be indented)\n" return ps "record matcher" matchoperatorp :: CsvRulesParser String matchoperatorp = choiceInState $ map string ["~" -- ,"!~" -- ,"=" -- ,"!=" ] patternsp :: CsvRulesParser [String] patternsp = do lift $ pdbg 3 "trying patternsp" ps <- many regexp return ps regexp :: CsvRulesParser String regexp = do lift $ pdbg 3 "trying regexp" notFollowedBy matchoperatorp c <- lift nonspace cs <- anyChar `manyTill` lift eolof return $ strip $ c:cs -- fieldmatcher = do -- pdbg 2 "trying fieldmatcher" -- f <- fromMaybe "all" `fmap` (optional $ do -- f' <- fieldname -- lift (many spacenonewline) -- return f') -- char '~' -- lift (many spacenonewline) -- ps <- patterns -- let r = "(" ++ intercalate "|" ps ++ ")" -- return (f,r) -- "field matcher" -------------------------------------------------------------------------------- -- Converting CSV records to journal transactions type CsvRecord = [String] -- Convert a CSV record to a transaction using the rules, or raise an -- error if the data can not be parsed. transactionFromCsvRecord :: SourcePos -> CsvRules -> CsvRecord -> Transaction transactionFromCsvRecord sourcepos rules record = t where mdirective = (`getDirective` rules) mfieldtemplate = getEffectiveAssignment rules record render = renderTemplate rules record mskip = mdirective "skip" mdefaultcurrency = mdirective "default-currency" mparsedate = parseDateWithFormatOrDefaultFormats (mdirective "date-format") -- render each field using its template and the csv record, and -- in some cases parse the rendered string (eg dates and amounts) mdateformat = mdirective "date-format" date = render $ fromMaybe "" $ mfieldtemplate "date" date' = fromMaybe (error' $ dateerror "date" date mdateformat) $ mparsedate date mdate2 = maybe Nothing (Just . render) $ mfieldtemplate "date2" mdate2' = maybe Nothing (maybe (error' $ dateerror "date2" (fromMaybe "" mdate2) mdateformat) Just . mparsedate) mdate2 dateerror datefield value mdateformat = unlines ["error: could not parse \""++value++"\" as a date using date format "++maybe "\"YYYY/M/D\", \"YYYY-M-D\" or \"YYYY.M.D\"" show mdateformat ,"the CSV record is: "++intercalate ", " (map show record) ,"the "++datefield++" rule is: "++(fromMaybe "required, but missing" $ mfieldtemplate datefield) ,"the date-format is: "++fromMaybe "unspecified" mdateformat ,"you may need to " ++"change your "++datefield++" rule, " ++maybe "add a" (const "change your") mdateformat++" date-format rule, " ++"or "++maybe "add a" (const "change your") mskip++" skip rule" ,"for m/d/y or d/m/y dates, use date-format %-m/%-d/%Y or date-format %-d/%-m/%Y" ] status = case mfieldtemplate "status" of Nothing -> Uncleared Just str -> either statuserror id . runParser (statusp <* eof) "" . T.pack $ render str where statuserror err = error' $ unlines ["error: could not parse \""++str++"\" as a cleared status (should be *, ! or empty)" ,"the parse error is: "++show err ] code = maybe "" render $ mfieldtemplate "code" description = maybe "" render $ mfieldtemplate "description" comment = maybe "" render $ mfieldtemplate "comment" precomment = maybe "" render $ mfieldtemplate "precomment" currency = maybe (fromMaybe "" mdefaultcurrency) render $ mfieldtemplate "currency" amountstr = (currency++) $ negateIfParenthesised $ getAmountStr rules record amount = either amounterror (Mixed . (:[])) $ runParser (evalStateT (amountp <* eof) mempty) "" $ T.pack amountstr amounterror err = error' $ unlines ["error: could not parse \""++amountstr++"\" as an amount" ,showRecord record ,"the amount rule is: "++(fromMaybe "" $ mfieldtemplate "amount") ,"the currency rule is: "++(fromMaybe "unspecified" $ mfieldtemplate "currency") ,"the default-currency is: "++fromMaybe "unspecified" mdefaultcurrency ,"the parse error is: "++show err ,"you may need to " ++"change your amount or currency rules, " ++"or "++maybe "add a" (const "change your") mskip++" skip rule" ] -- Using costOfMixedAmount here to allow complex costs like "10 GBP @@ 15 USD". -- Aim is to have "10 GBP @@ 15 USD" applied to account2, but have "-15USD" applied to account1 amount1 = costOfMixedAmount amount amount2 = (-amount) s `or` def = if null s then def else s defaccount1 = fromMaybe "unknown" $ mdirective "default-account1" defaccount2 = case isNegativeMixedAmount amount2 of Just True -> "income:unknown" _ -> "expenses:unknown" account1 = T.pack $ maybe "" render (mfieldtemplate "account1") `or` defaccount1 account2 = T.pack $ maybe "" render (mfieldtemplate "account2") `or` defaccount2 -- build the transaction t = nulltransaction{ tsourcepos = genericSourcePos sourcepos, tdate = date', tdate2 = mdate2', tstatus = status, tcode = T.pack code, tdescription = T.pack description, tcomment = T.pack comment, tpreceding_comment_lines = T.pack precomment, tpostings = [posting {paccount=account2, pamount=amount2, ptransaction=Just t} ,posting {paccount=account1, pamount=amount1, ptransaction=Just t} ] } getAmountStr :: CsvRules -> CsvRecord -> String getAmountStr rules record = let mamount = getEffectiveAssignment rules record "amount" mamountin = getEffectiveAssignment rules record "amount-in" mamountout = getEffectiveAssignment rules record "amount-out" render = fmap (strip . renderTemplate rules record) in case (render mamount, render mamountin, render mamountout) of (Just "", Nothing, Nothing) -> error' $ "amount has no value\n"++showRecord record (Just a, Nothing, Nothing) -> a (Nothing, Just "", Just "") -> error' $ "neither amount-in or amount-out has a value\n"++showRecord record (Nothing, Just i, Just "") -> i (Nothing, Just "", Just o) -> negateStr o (Nothing, Just _, Just _) -> error' $ "both amount-in and amount-out have a value\n"++showRecord record _ -> error' $ "found values for amount and for amount-in/amount-out - please use either amount or amount-in/amount-out\n"++showRecord record negateIfParenthesised :: String -> String negateIfParenthesised ('(':s) | lastMay s == Just ')' = negateStr $ init s negateIfParenthesised s = s negateStr :: String -> String negateStr ('-':s) = s negateStr s = '-':s -- | Show a (approximate) recreation of the original CSV record. showRecord :: CsvRecord -> String showRecord r = "the CSV record is: "++intercalate ", " (map show r) -- | Given the conversion rules, a CSV record and a journal entry field name, find -- the template value ultimately assigned to this field, either at top -- level or in a matching conditional block. Conditional blocks' -- patterns are matched against an approximation of the original CSV -- record: all the field values with commas intercalated. getEffectiveAssignment :: CsvRules -> CsvRecord -> JournalFieldName -> Maybe FieldTemplate getEffectiveAssignment rules record f = lastMay $ assignmentsFor f where assignmentsFor f = map snd $ filter ((==f).fst) $ toplevelassignments ++ conditionalassignments where toplevelassignments = rassignments rules conditionalassignments = concatMap snd $ filter blockMatches $ blocksAssigning f where blocksAssigning f = filter (any ((==f).fst) . snd) $ rconditionalblocks rules blockMatches :: ConditionalBlock -> Bool blockMatches (matchers,_) = all matcherMatches matchers where matcherMatches :: RecordMatcher -> Bool -- matcherMatches pats = any patternMatches pats matcherMatches pats = patternMatches $ "(" ++ intercalate "|" pats ++ ")" where patternMatches :: RegexpPattern -> Bool patternMatches pat = regexMatchesCI pat csvline where csvline = intercalate "," record renderTemplate :: CsvRules -> CsvRecord -> FieldTemplate -> String renderTemplate rules record t = regexReplaceBy "%[A-z0-9]+" replace t where replace ('%':pat) = maybe pat (\i -> atDef "" record (i-1)) mi where mi | all isDigit pat = readMay pat | otherwise = lookup pat $ rcsvfieldindexes rules replace pat = pat -- Parse the date string using the specified date-format, or if unspecified try these default formats: -- YYYY/MM/DD, YYYY-MM-DD, YYYY.MM.DD, MM/DD/YYYY (month and day can be 1 or 2 digits, year must be 4). parseDateWithFormatOrDefaultFormats :: Maybe DateFormat -> String -> Maybe Day parseDateWithFormatOrDefaultFormats mformat s = firstJust $ map parsewith formats where parsetime = #if MIN_VERSION_time(1,5,0) parseTimeM True #else parseTime #endif parsewith = flip (parsetime defaultTimeLocale) s formats = maybe ["%Y/%-m/%-d" ,"%Y-%-m-%-d" ,"%Y.%-m.%-d" -- ,"%-m/%-d/%Y" -- ,parseTime defaultTimeLocale "%Y/%m/%e" (take 5 s ++ "0" ++ drop 5 s) -- ,parseTime defaultTimeLocale "%Y-%m-%e" (take 5 s ++ "0" ++ drop 5 s) -- ,parseTime defaultTimeLocale "%m/%e/%Y" ('0':s) -- ,parseTime defaultTimeLocale "%m-%e-%Y" ('0':s) ] (:[]) mformat -------------------------------------------------------------------------------- -- tests tests_Hledger_Read_CsvReader = TestList (test_parser) -- ++ test_description_parsing) -- test_description_parsing = [ -- "description-field 1" ~: assertParseDescription "description-field 1\n" [FormatField False Nothing Nothing (FieldNo 1)] -- , "description-field 1 " ~: assertParseDescription "description-field 1 \n" [FormatField False Nothing Nothing (FieldNo 1)] -- , "description-field %(1)" ~: assertParseDescription "description-field %(1)\n" [FormatField False Nothing Nothing (FieldNo 1)] -- , "description-field %(1)/$(2)" ~: assertParseDescription "description-field %(1)/%(2)\n" [ -- FormatField False Nothing Nothing (FieldNo 1) -- , FormatLiteral "/" -- , FormatField False Nothing Nothing (FieldNo 2) -- ] -- ] -- where -- assertParseDescription string expected = do assertParseEqual (parseDescription string) (rules {descriptionField = expected}) -- parseDescription :: String -> Either ParseError CsvRules -- parseDescription x = runParser descriptionfieldWrapper rules "(unknown)" x -- descriptionfieldWrapper :: GenParser Char CsvRules CsvRules -- descriptionfieldWrapper = do -- descriptionfield -- r <- getState -- return r test_parser = [ "convert rules parsing: empty file" ~: do -- let assertMixedAmountParse parseresult mixedamount = -- (either (const "parse error") showMixedAmountDebug parseresult) ~?= (showMixedAmountDebug mixedamount) assertParseEqual (parseCsvRules "unknown" "") rules -- ,"convert rules parsing: accountrule" ~: do -- assertParseEqual (parseWithState rules accountrule "A\na\n") -- leading blank line required -- ([("A",Nothing)], "a") ,"convert rules parsing: trailing comments" ~: do assertParse (parseWithState' rules rulesp "skip\n# \n#\n") ,"convert rules parsing: trailing blank lines" ~: do assertParse (parseWithState' rules rulesp "skip\n\n \n") ,"convert rules parsing: empty field value" ~: do assertParse (parseWithState' rules rulesp "account1 \nif foo\n account2 foo\n") -- not supported -- ,"convert rules parsing: no final newline" ~: do -- assertParse (parseWithState rules csvrulesfile "A\na") -- assertParse (parseWithState rules csvrulesfile "A\na\n# \n#") -- assertParse (parseWithState rules csvrulesfile "A\na\n\n ") -- (rules{ -- -- dateField=Maybe FieldPosition, -- -- statusField=Maybe FieldPosition, -- -- codeField=Maybe FieldPosition, -- -- descriptionField=Maybe FieldPosition, -- -- amountField=Maybe FieldPosition, -- -- currencyField=Maybe FieldPosition, -- -- baseCurrency=Maybe String, -- -- baseAccount=AccountName, -- accountRules=[ -- ([("A",Nothing)], "a") -- ] -- }) ] hledger-lib-1.2/Hledger/Read/JournalReader.hs0000644000000000000000000006113613066173044017251 0ustar0000000000000000--- * doc -- Lines beginning "--- *" are collapsible orgstruct nodes. Emacs users, -- (add-hook 'haskell-mode-hook -- (lambda () (set-variable 'orgstruct-heading-prefix-regexp "--- " t)) -- 'orgstruct-mode) -- and press TAB on nodes to expand/collapse. {-| A reader for hledger's journal file format (). hledger's journal format is a compatible subset of c++ ledger's (), so this reader should handle many ledger files as well. Example: @ 2012\/3\/24 gift expenses:gifts $10 assets:cash @ Journal format supports the include directive which can read files in other formats, so the other file format readers need to be importable here. Some low-level journal syntax parsers which those readers also use are therefore defined separately in Hledger.Read.Common, avoiding import cycles. -} --- * module {-# LANGUAGE CPP, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings #-} module Hledger.Read.JournalReader ( --- * exports -- * Reader reader, -- * Parsing utils genericSourcePos, parseAndFinaliseJournal, runJournalParser, rjp, runErroringJournalParser, rejp, -- * Parsers used elsewhere getParentAccount, journalp, directivep, defaultyeardirectivep, marketpricedirectivep, datetimep, datep, -- codep, -- accountnamep, modifiedaccountnamep, postingp, -- amountp, -- amountp', -- mamountp', -- numberp, statusp, emptyorcommentlinep, followingcommentp, accountaliasp -- * Tests ,tests_Hledger_Read_JournalReader ) where --- * imports import Prelude () import Prelude.Compat hiding (readFile) import qualified Control.Exception as C import Control.Monad import Control.Monad.Except (ExceptT(..), runExceptT, throwError) import Control.Monad.State.Strict import qualified Data.Map.Strict as M import Data.Monoid import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Data.Time.LocalTime import Safe import Test.HUnit #ifdef TESTS import Test.Framework import Text.Megaparsec.Error #endif import Text.Megaparsec hiding (parse) import Text.Printf import System.FilePath import Hledger.Data import Hledger.Read.Common import Hledger.Read.TimeclockReader (timeclockfilep) import Hledger.Read.TimedotReader (timedotfilep) import Hledger.Utils -- $setup -- >>> :set -XOverloadedStrings --- * reader reader :: Reader reader = Reader {rFormat = "journal" ,rExtensions = ["journal", "j", "hledger", "ledger"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from hledger's journal file -- format, or give an error. parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse _ = parseAndFinaliseJournal journalp --- * parsers --- ** journal -- | A journal parser. Accumulates and returns a "ParsedJournal", -- which should be finalised/validated before use. -- -- >>> rejp (journalp <* eof) "2015/1/1\n a 0\n" -- Right Journal with 1 transactions, 1 accounts -- journalp :: MonadIO m => ErroringJournalParser m ParsedJournal journalp = do many addJournalItemP eof get -- | A side-effecting parser; parses any kind of journal item -- and updates the parse state accordingly. addJournalItemP :: MonadIO m => ErroringJournalParser m () addJournalItemP = -- all journal line types can be distinguished by the first -- character, can use choice without backtracking choice [ directivep , transactionp >>= modify' . addTransaction , modifiertransactionp >>= modify' . addModifierTransaction , periodictransactionp >>= modify' . addPeriodicTransaction , marketpricedirectivep >>= modify' . addMarketPrice , void emptyorcommentlinep , void multilinecommentp ] "transaction or directive" --- ** directives -- | Parse any journal directive and update the parse state accordingly. -- Cf http://hledger.org/manual.html#directives, -- http://ledger-cli.org/3.0/doc/ledger3.html#Command-Directives directivep :: MonadIO m => ErroringJournalParser m () directivep = (do optional $ char '!' choiceInState [ includedirectivep ,aliasdirectivep ,endaliasesdirectivep ,accountdirectivep ,applyaccountdirectivep ,commoditydirectivep ,endapplyaccountdirectivep ,tagdirectivep ,endtagdirectivep ,defaultyeardirectivep ,defaultcommoditydirectivep ,commodityconversiondirectivep ,ignoredpricecommoditydirectivep ] ) "directive" includedirectivep :: MonadIO m => ErroringJournalParser m () includedirectivep = do string "include" lift (some spacenonewline) filename <- lift restofline parentpos <- getPosition parentj <- get let childj = newJournalWithParseStateFrom parentj (ej :: Either String ParsedJournal) <- liftIO $ runExceptT $ do let curdir = takeDirectory (sourceName parentpos) filepath <- expandPath curdir filename `orRethrowIOError` (show parentpos ++ " locating " ++ filename) txt <- readFileAnyLineEnding filepath `orRethrowIOError` (show parentpos ++ " reading " ++ filepath) (ej1::Either (ParseError Char Dec) ParsedJournal) <- runParserT (evalStateT (choiceInState [journalp ,timeclockfilep ,timedotfilep -- can't include a csv file yet, that reader is special ]) childj) filepath txt either (throwError . ((show parentpos ++ " in included file " ++ show filename ++ ":\n") ++) . show) (return . journalAddFile (filepath, txt)) ej1 case ej of Left e -> throwError e Right childj -> modify' (\parentj -> childj <> parentj) -- discard child's parse info, prepend its (reversed) list data, combine other fields newJournalWithParseStateFrom :: Journal -> Journal newJournalWithParseStateFrom j = mempty{ jparsedefaultyear = jparsedefaultyear j ,jparsedefaultcommodity = jparsedefaultcommodity j ,jparseparentaccounts = jparseparentaccounts j ,jparsealiases = jparsealiases j -- ,jparsetransactioncount = jparsetransactioncount j ,jparsetimeclockentries = jparsetimeclockentries j } -- | Lift an IO action into the exception monad, rethrowing any IO -- error with the given message prepended. orRethrowIOError :: IO a -> String -> ExceptT String IO a orRethrowIOError io msg = ExceptT $ (Right <$> io) `C.catch` \(e::C.IOException) -> return $ Left $ printf "%s:\n%s" msg (show e) accountdirectivep :: JournalStateParser m () accountdirectivep = do string "account" lift (some spacenonewline) acct <- lift accountnamep newline many indentedlinep modify' (\j -> j{jaccounts = acct : jaccounts j}) indentedlinep :: JournalStateParser m String indentedlinep = lift (some spacenonewline) >> (rstrip <$> lift restofline) -- | Parse a one-line or multi-line commodity directive. -- -- >>> Right _ <- rejp commoditydirectivep "commodity $1.00" -- >>> Right _ <- rejp commoditydirectivep "commodity $\n format $1.00" -- >>> Right _ <- rejp commoditydirectivep "commodity $\n\n" -- a commodity with no format -- >>> Right _ <- rejp commoditydirectivep "commodity $1.00\n format $1.00" -- both, what happens ? commoditydirectivep :: Monad m => ErroringJournalParser m () commoditydirectivep = try commoditydirectiveonelinep <|> commoditydirectivemultilinep -- | Parse a one-line commodity directive. -- -- >>> Right _ <- rejp commoditydirectiveonelinep "commodity $1.00" -- >>> Right _ <- rejp commoditydirectiveonelinep "commodity $1.00 ; blah\n" commoditydirectiveonelinep :: Monad m => JournalStateParser m () commoditydirectiveonelinep = do string "commodity" lift (some spacenonewline) Amount{acommodity,astyle} <- amountp lift (many spacenonewline) _ <- followingcommentp <|> (lift eolof >> return "") let comm = Commodity{csymbol=acommodity, cformat=Just astyle} modify' (\j -> j{jcommodities=M.insert acommodity comm $ jcommodities j}) -- | Parse a multi-line commodity directive, containing 0 or more format subdirectives. -- -- >>> Right _ <- rejp commoditydirectivemultilinep "commodity $ ; blah \n format $1.00 ; blah" commoditydirectivemultilinep :: Monad m => ErroringJournalParser m () commoditydirectivemultilinep = do string "commodity" lift (some spacenonewline) sym <- lift commoditysymbolp _ <- followingcommentp <|> (lift eolof >> return "") mformat <- lastMay <$> many (indented $ formatdirectivep sym) let comm = Commodity{csymbol=sym, cformat=mformat} modify' (\j -> j{jcommodities=M.insert sym comm $ jcommodities j}) where indented = (lift (some spacenonewline) >>) -- | Parse a format (sub)directive, throwing a parse error if its -- symbol does not match the one given. formatdirectivep :: Monad m => CommoditySymbol -> ErroringJournalParser m AmountStyle formatdirectivep expectedsym = do string "format" lift (some spacenonewline) pos <- getPosition Amount{acommodity,astyle} <- amountp _ <- followingcommentp <|> (lift eolof >> return "") if acommodity==expectedsym then return astyle else parserErrorAt pos $ printf "commodity directive symbol \"%s\" and format directive symbol \"%s\" should be the same" expectedsym acommodity applyaccountdirectivep :: JournalStateParser m () applyaccountdirectivep = do string "apply" >> lift (some spacenonewline) >> string "account" lift (some spacenonewline) parent <- lift accountnamep newline pushParentAccount parent endapplyaccountdirectivep :: JournalStateParser m () endapplyaccountdirectivep = do string "end" >> lift (some spacenonewline) >> string "apply" >> lift (some spacenonewline) >> string "account" popParentAccount aliasdirectivep :: JournalStateParser m () aliasdirectivep = do string "alias" lift (some spacenonewline) alias <- lift accountaliasp addAccountAlias alias accountaliasp :: TextParser m AccountAlias accountaliasp = regexaliasp <|> basicaliasp basicaliasp :: TextParser m AccountAlias basicaliasp = do -- pdbg 0 "basicaliasp" old <- rstrip <$> (some $ noneOf ("=" :: [Char])) char '=' many spacenonewline new <- rstrip <$> anyChar `manyTill` eolof -- don't require a final newline, good for cli options return $ BasicAlias (T.pack old) (T.pack new) regexaliasp :: TextParser m AccountAlias regexaliasp = do -- pdbg 0 "regexaliasp" char '/' re <- some $ noneOf ("/\n\r" :: [Char]) -- paranoid: don't try to read past line end char '/' many spacenonewline char '=' many spacenonewline repl <- rstrip <$> anyChar `manyTill` eolof return $ RegexAlias re repl endaliasesdirectivep :: JournalStateParser m () endaliasesdirectivep = do string "end aliases" clearAccountAliases tagdirectivep :: JournalStateParser m () tagdirectivep = do string "tag" "tag directive" lift (some spacenonewline) _ <- lift $ some nonspace lift restofline return () endtagdirectivep :: JournalStateParser m () endtagdirectivep = do (string "end tag" <|> string "pop") "end tag or pop directive" lift restofline return () defaultyeardirectivep :: JournalStateParser m () defaultyeardirectivep = do char 'Y' "default year" lift (many spacenonewline) y <- some digitChar let y' = read y failIfInvalidYear y setYear y' defaultcommoditydirectivep :: Monad m => JournalStateParser m () defaultcommoditydirectivep = do char 'D' "default commodity" lift (some spacenonewline) Amount{..} <- amountp lift restofline setDefaultCommodityAndStyle (acommodity, astyle) marketpricedirectivep :: Monad m => JournalStateParser m MarketPrice marketpricedirectivep = do char 'P' "market price" lift (many spacenonewline) date <- try (do {LocalTime d _ <- datetimep; return d}) <|> datep -- a time is ignored lift (some spacenonewline) symbol <- lift commoditysymbolp lift (many spacenonewline) price <- amountp lift restofline return $ MarketPrice date symbol price ignoredpricecommoditydirectivep :: JournalStateParser m () ignoredpricecommoditydirectivep = do char 'N' "ignored-price commodity" lift (some spacenonewline) lift commoditysymbolp lift restofline return () commodityconversiondirectivep :: Monad m => JournalStateParser m () commodityconversiondirectivep = do char 'C' "commodity conversion" lift (some spacenonewline) amountp lift (many spacenonewline) char '=' lift (many spacenonewline) amountp lift restofline return () --- ** transactions modifiertransactionp :: MonadIO m => ErroringJournalParser m ModifierTransaction modifiertransactionp = do char '=' "modifier transaction" lift (many spacenonewline) valueexpr <- T.pack <$> lift restofline postings <- postingsp Nothing return $ ModifierTransaction valueexpr postings periodictransactionp :: MonadIO m => ErroringJournalParser m PeriodicTransaction periodictransactionp = do char '~' "periodic transaction" lift (many spacenonewline) periodexpr <- T.pack <$> lift restofline postings <- postingsp Nothing return $ PeriodicTransaction periodexpr postings -- | Parse a (possibly unbalanced) transaction. transactionp :: MonadIO m => ErroringJournalParser m Transaction transactionp = do -- ptrace "transactionp" pos <- getPosition date <- datep "transaction" edate <- optional (secondarydatep date) "secondary date" lookAhead (lift spacenonewline <|> newline) "whitespace or newline" status <- lift statusp "cleared status" code <- T.pack <$> lift codep "transaction code" description <- T.pack . strip <$> descriptionp comment <- try followingcommentp <|> (newline >> return "") let tags = commentTags comment postings <- postingsp (Just date) pos' <- getPosition let sourcepos = journalSourcePos pos pos' return $ txnTieKnot $ Transaction 0 sourcepos date edate status code description comment tags postings "" #ifdef TESTS test_transactionp = do let s `gives` t = do let p = parseWithState mempty transactionp s assertBool $ isRight p let Right t2 = p -- same f = assertEqual (f t) (f t2) assertEqual (tdate t) (tdate t2) assertEqual (tdate2 t) (tdate2 t2) assertEqual (tstatus t) (tstatus t2) assertEqual (tcode t) (tcode t2) assertEqual (tdescription t) (tdescription t2) assertEqual (tcomment t) (tcomment t2) assertEqual (ttags t) (ttags t2) assertEqual (tpreceding_comment_lines t) (tpreceding_comment_lines t2) assertEqual (show $ tpostings t) (show $ tpostings t2) -- "0000/01/01\n\n" `gives` nulltransaction unlines [ "2012/05/14=2012/05/15 (code) desc ; tcomment1", " ; tcomment2", " ; ttag1: val1", " * a $1.00 ; pcomment1", " ; pcomment2", " ; ptag1: val1", " ; ptag2: val2" ] `gives` nulltransaction{ tdate=parsedate "2012/05/14", tdate2=Just $ parsedate "2012/05/15", tstatus=Uncleared, tcode="code", tdescription="desc", tcomment=" tcomment1\n tcomment2\n ttag1: val1\n", ttags=[("ttag1","val1")], tpostings=[ nullposting{ pstatus=Cleared, paccount="a", pamount=Mixed [usd 1], pcomment=" pcomment1\n pcomment2\n ptag1: val1\n ptag2: val2\n", ptype=RegularPosting, ptags=[("ptag1","val1"),("ptag2","val2")], ptransaction=Nothing } ], tpreceding_comment_lines="" } unlines [ "2015/1/1", ] `gives` nulltransaction{ tdate=parsedate "2015/01/01", } assertRight $ parseWithState mempty transactionp $ unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking $-47.18" ,"" ] -- transactionp should not parse just a date assertLeft $ parseWithState mempty transactionp "2009/1/1\n" -- transactionp should not parse just a date and description assertLeft $ parseWithState mempty transactionp "2009/1/1 a\n" -- transactionp should not parse a following comment as part of the description let p = parseWithState mempty transactionp "2009/1/1 a ;comment\n b 1\n" assertRight p assertEqual "a" (let Right p' = p in tdescription p') -- parse transaction with following whitespace line assertRight $ parseWithState mempty transactionp $ unlines ["2012/1/1" ," a 1" ," b" ," " ] let p = parseWithState mempty transactionp $ unlines ["2009/1/1 x ; transaction comment" ," a 1 ; posting 1 comment" ," ; posting 1 comment 2" ," b" ," ; posting 2 comment" ] assertRight p assertEqual 2 (let Right t = p in length $ tpostings t) #endif --- ** postings -- Parse the following whitespace-beginning lines as postings, posting -- tags, and/or comments (inferring year, if needed, from the given date). postingsp :: MonadIO m => Maybe Day -> ErroringJournalParser m [Posting] postingsp mdate = many (try $ postingp mdate) "postings" -- linebeginningwithspaces :: Monad m => JournalParser m String -- linebeginningwithspaces = do -- sp <- lift (some spacenonewline) -- c <- nonspace -- cs <- lift restofline -- return $ sp ++ (c:cs) ++ "\n" postingp :: MonadIO m => Maybe Day -> ErroringJournalParser m Posting postingp mtdate = do -- pdbg 0 "postingp" lift (some spacenonewline) status <- lift statusp lift (many spacenonewline) account <- modifiedaccountnamep let (ptype, account') = (accountNamePostingType account, textUnbracket account) amount <- spaceandamountormissingp massertion <- partialbalanceassertionp _ <- fixedlotpricep lift (many spacenonewline) (comment,tags,mdate,mdate2) <- try (followingcommentandtagsp mtdate) <|> (newline >> return ("",[],Nothing,Nothing)) return posting { pdate=mdate , pdate2=mdate2 , pstatus=status , paccount=account' , pamount=amount , pcomment=comment , ptype=ptype , ptags=tags , pbalanceassertion=massertion } #ifdef TESTS test_postingp = do let s `gives` ep = do let parse = parseWithState mempty (postingp Nothing) s assertBool -- "postingp parser" $ isRight parse let Right ap = parse same f = assertEqual (f ep) (f ap) same pdate same pstatus same paccount same pamount same pcomment same ptype same ptags same ptransaction " expenses:food:dining $10.00 ; a: a a \n ; b: b b \n" `gives` posting{paccount="expenses:food:dining", pamount=Mixed [usd 10], pcomment=" a: a a \n b: b b \n", ptags=[("a","a a"), ("b","b b")]} " a 1 ; [2012/11/28]\n" `gives` ("a" `post` num 1){pcomment=" [2012/11/28]\n" ,ptags=[("date","2012/11/28")] ,pdate=parsedateM "2012/11/28"} " a 1 ; a:a, [=2012/11/28]\n" `gives` ("a" `post` num 1){pcomment=" a:a, [=2012/11/28]\n" ,ptags=[("a","a"), ("date2","2012/11/28")] ,pdate=Nothing} " a 1 ; a:a\n ; [2012/11/28=2012/11/29],b:b\n" `gives` ("a" `post` num 1){pcomment=" a:a\n [2012/11/28=2012/11/29],b:b\n" ,ptags=[("a","a"), ("date","2012/11/28"), ("date2","2012/11/29"), ("b","b")] ,pdate=parsedateM "2012/11/28"} assertBool -- "postingp parses a quoted commodity with numbers" (isRight $ parseWithState mempty (postingp Nothing) " a 1 \"DE123\"\n") -- ,"postingp parses balance assertions and fixed lot prices" ~: do assertBool (isRight $ parseWithState mempty (postingp Nothing) " a 1 \"DE123\" =$1 { =2.2 EUR} \n") -- let parse = parseWithState mempty postingp " a\n ;next-line comment\n" -- assertRight parse -- let Right p = parse -- assertEqual "next-line comment\n" (pcomment p) -- assertEqual (Just nullmixedamt) (pbalanceassertion p) #endif --- * more tests tests_Hledger_Read_JournalReader = TestList $ concat [ -- test_numberp [ "showParsedMarketPrice" ~: do let mp = parseWithState mempty marketpricedirectivep "P 2017/01/30 BTC $922.83\n" mpString = (fmap . fmap) showMarketPrice mp mpString `is` (Just (Right "P 2017/01/30 BTC $922.83")) ] ] {- old hunit tests tests_Hledger_Read_JournalReader = TestList $ concat [ test_numberp, test_amountp, test_spaceandamountormissingp, test_tagcomment, test_inlinecomment, test_comments, test_ledgerDateSyntaxToTags, test_postingp, test_transactionp, [ "modifiertransactionp" ~: do assertParse (parseWithState mempty modifiertransactionp "= (some value expr)\n some:postings 1\n") ,"periodictransactionp" ~: do assertParse (parseWithState mempty periodictransactionp "~ (some period expr)\n some:postings 1\n") ,"directivep" ~: do assertParse (parseWithState mempty directivep "!include /some/file.x\n") assertParse (parseWithState mempty directivep "account some:account\n") assertParse (parseWithState mempty (directivep >> directivep) "!account a\nend\n") ,"comment" ~: do assertParse (parseWithState mempty comment "; some comment \n") assertParse (parseWithState mempty comment " \t; x\n") assertParse (parseWithState mempty comment "#x") ,"datep" ~: do assertParse (parseWithState mempty datep "2011/1/1") assertParseFailure (parseWithState mempty datep "1/1") assertParse (parseWithState mempty{jpsYear=Just 2011} datep "1/1") ,"datetimep" ~: do let p = do {t <- datetimep; eof; return t} bad = assertParseFailure . parseWithState mempty p good = assertParse . parseWithState mempty p bad "2011/1/1" bad "2011/1/1 24:00:00" bad "2011/1/1 00:60:00" bad "2011/1/1 00:00:60" good "2011/1/1 00:00" good "2011/1/1 23:59:59" good "2011/1/1 3:5:7" -- timezone is parsed but ignored let startofday = LocalTime (fromGregorian 2011 1 1) (TimeOfDay 0 0 (fromIntegral 0)) assertParseEqual (parseWithState mempty p "2011/1/1 00:00-0800") startofday assertParseEqual (parseWithState mempty p "2011/1/1 00:00+1234") startofday ,"defaultyeardirectivep" ~: do assertParse (parseWithState mempty defaultyeardirectivep "Y 2010\n") assertParse (parseWithState mempty defaultyeardirectivep "Y 10001\n") ,"marketpricedirectivep" ~: assertParseEqual (parseWithState mempty marketpricedirectivep "P 2004/05/01 XYZ $55.00\n") (MarketPrice (parsedate "2004/05/01") "XYZ" $ usd 55) ,"ignoredpricecommoditydirectivep" ~: do assertParse (parseWithState mempty ignoredpricecommoditydirectivep "N $\n") ,"defaultcommoditydirectivep" ~: do assertParse (parseWithState mempty defaultcommoditydirectivep "D $1,000.0\n") ,"commodityconversiondirectivep" ~: do assertParse (parseWithState mempty commodityconversiondirectivep "C 1h = $50.00\n") ,"tagdirectivep" ~: do assertParse (parseWithState mempty tagdirectivep "tag foo \n") ,"endtagdirectivep" ~: do assertParse (parseWithState mempty endtagdirectivep "end tag \n") assertParse (parseWithState mempty endtagdirectivep "pop \n") ,"accountnamep" ~: do assertBool "accountnamep parses a normal account name" (isRight $ parsewith accountnamep "a:b:c") assertBool "accountnamep rejects an empty inner component" (isLeft $ parsewith accountnamep "a::c") assertBool "accountnamep rejects an empty leading component" (isLeft $ parsewith accountnamep ":b:c") assertBool "accountnamep rejects an empty trailing component" (isLeft $ parsewith accountnamep "a:b:") ,"leftsymbolamountp" ~: do assertParseEqual (parseWithState mempty leftsymbolamountp "$1") (usd 1 `withPrecision` 0) assertParseEqual (parseWithState mempty leftsymbolamountp "$-1") (usd (-1) `withPrecision` 0) assertParseEqual (parseWithState mempty leftsymbolamountp "-$1") (usd (-1) `withPrecision` 0) ,"amount" ~: do let -- | compare a parse result with an expected amount, showing the debug representation for clarity assertAmountParse parseresult amount = (either (const "parse error") showAmountDebug parseresult) ~?= (showAmountDebug amount) assertAmountParse (parseWithState mempty amountp "1 @ $2") (num 1 `withPrecision` 0 `at` (usd 2 `withPrecision` 0)) ]] -} hledger-lib-1.2/Hledger/Read/TimeclockReader.hs0000644000000000000000000001040113035510426017531 0ustar0000000000000000{-| A reader for the timeclock file format generated by timeclock.el (). Example: @ i 2007\/03\/10 12:26:00 hledger o 2007\/03\/10 17:26:02 @ From timeclock.el 2.6: @ A timeclock contains data in the form of a single entry per line. Each entry has the form: CODE YYYY/MM/DD HH:MM:SS [COMMENT] CODE is one of: b, h, i, o or O. COMMENT is optional when the code is i, o or O. The meanings of the codes are: b Set the current time balance, or \"time debt\". Useful when archiving old log data, when a debt must be carried forward. The COMMENT here is the number of seconds of debt. h Set the required working time for the given day. This must be the first entry for that day. The COMMENT in this case is the number of hours in this workday. Floating point amounts are allowed. i Clock in. The COMMENT in this case should be the name of the project worked on. o Clock out. COMMENT is unnecessary, but can be used to provide a description of how the period went, for example. O Final clock out. Whatever project was being worked on, it is now finished. Useful for creating summary reports. @ -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Read.TimeclockReader ( -- * Reader reader, -- * Misc other exports timeclockfilep, -- * Tests tests_Hledger_Read_TimeclockReader ) where import Prelude () import Prelude.Compat import Control.Monad import Control.Monad.Except (ExceptT) import Control.Monad.State.Strict import Data.Maybe (fromMaybe) import Data.Text (Text) import qualified Data.Text as T import Test.HUnit import Text.Megaparsec hiding (parse) import Hledger.Data -- XXX too much reuse ? import Hledger.Read.Common import Hledger.Utils reader :: Reader reader = Reader {rFormat = "timeclock" ,rExtensions = ["timeclock"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from timeclock.el's timeclock -- format, saving the provided file path and the current time, or give an -- error. parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse _ = parseAndFinaliseJournal timeclockfilep timeclockfilep :: ErroringJournalParser IO ParsedJournal timeclockfilep = do many timeclockitemp eof j@Journal{jparsetimeclockentries=es} <- get -- Convert timeclock entries in this journal to transactions, closing any unfinished sessions. -- Doing this here rather than in journalFinalise means timeclock sessions can't span file boundaries, -- but it simplifies code above. now <- liftIO getCurrentLocalTime -- entries have been parsed in reverse order. timeclockEntriesToTransactions -- expects them to be in normal order, then we must reverse again since -- journalFinalise expects them in reverse order let j' = j{jtxns = reverse $ timeclockEntriesToTransactions now $ reverse es, jparsetimeclockentries = []} return j' where -- As all ledger line types can be distinguished by the first -- character, excepting transactions versus empty (blank or -- comment-only) lines, can use choice w/o try timeclockitemp = choice [ void emptyorcommentlinep , timeclockentryp >>= \e -> modify' (\j -> j{jparsetimeclockentries = e : jparsetimeclockentries j}) ] "timeclock entry, or default year or historical price directive" -- | Parse a timeclock entry. timeclockentryp :: JournalStateParser m TimeclockEntry timeclockentryp = do sourcepos <- genericSourcePos <$> lift getPosition code <- oneOf ("bhioO" :: [Char]) lift (some spacenonewline) datetime <- datetimep account <- fromMaybe "" <$> optional (lift (some spacenonewline) >> modifiedaccountnamep) description <- T.pack . fromMaybe "" <$> lift (optional (some spacenonewline >> restofline)) return $ TimeclockEntry sourcepos (read [code]) datetime account description tests_Hledger_Read_TimeclockReader = TestList [ ] hledger-lib-1.2/Hledger/Read/TimedotReader.hs0000644000000000000000000001001313035510426017223 0ustar0000000000000000{-| A reader for the "timedot" file format. Example: @ #DATE #ACCT DOTS # Each dot represents 15m, spaces are ignored # on 2/1, 1h was spent on FOSS haskell work, 0.25h on research, etc. 2/1 fos.haskell .... .. biz.research . inc.client1 .... .... .... .... .... .... 2/2 biz.research . inc.client1 .... .... .. @ -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Read.TimedotReader ( -- * Reader reader, -- * Misc other exports timedotfilep, -- * Tests tests_Hledger_Read_TimedotReader ) where import Prelude () import Prelude.Compat import Control.Monad import Control.Monad.Except (ExceptT) import Control.Monad.State.Strict import Data.Char (isSpace) import Data.List (foldl') import Data.Maybe import Data.Text (Text) import Test.HUnit import Text.Megaparsec hiding (parse) import Hledger.Data import Hledger.Read.Common import Hledger.Utils hiding (ptrace) -- easier to toggle this here sometimes -- import qualified Hledger.Utils (ptrace) -- ptrace = Hledger.Utils.ptrace ptrace :: Monad m => a -> m a ptrace = return reader :: Reader reader = Reader {rFormat = "timedot" ,rExtensions = ["timedot"] ,rParser = parse ,rExperimental = False } -- | Parse and post-process a "Journal" from the timedot format, or give an error. parse :: Maybe FilePath -> Bool -> FilePath -> Text -> ExceptT String IO Journal parse _ = parseAndFinaliseJournal timedotfilep timedotfilep :: JournalStateParser m ParsedJournal timedotfilep = do many timedotfileitemp eof get where timedotfileitemp :: JournalStateParser m () timedotfileitemp = do ptrace "timedotfileitemp" choice [ void emptyorcommentlinep ,timedotdayp >>= \ts -> modify' (addTransactions ts) ] "timedot day entry, or default year or comment line or blank line" addTransactions :: [Transaction] -> Journal -> Journal addTransactions ts j = foldl' (flip ($)) j (map addTransaction ts) -- | Parse timedot day entries to zero or more time transactions for that day. -- @ -- 2/1 -- fos.haskell .... .. -- biz.research . -- inc.client1 .... .... .... .... .... .... -- @ timedotdayp :: JournalStateParser m [Transaction] timedotdayp = do ptrace " timedotdayp" d <- datep <* lift eolof es <- catMaybes <$> many (const Nothing <$> try emptyorcommentlinep <|> Just <$> (notFollowedBy datep >> timedotentryp)) return $ map (\t -> t{tdate=d}) es -- <$> many timedotentryp -- | Parse a single timedot entry to one (dateless) transaction. -- @ -- fos.haskell .... .. -- @ timedotentryp :: JournalStateParser m Transaction timedotentryp = do ptrace " timedotentryp" pos <- genericSourcePos <$> getPosition lift (many spacenonewline) a <- modifiedaccountnamep lift (many spacenonewline) hours <- try (followingcommentp >> return 0) <|> (timedotdurationp <* (try followingcommentp <|> (newline >> return ""))) let t = nulltransaction{ tsourcepos = pos, tstatus = Cleared, tpostings = [ nullposting{paccount=a ,pamount=Mixed [setAmountPrecision 2 $ num hours] -- don't assume hours; do set precision to 2 ,ptype=VirtualPosting ,ptransaction=Just t } ] } return t timedotdurationp :: JournalStateParser m Quantity timedotdurationp = try timedotnumberp <|> timedotdotsp -- | Parse a duration written as a decimal number of hours (optionally followed by the letter h). -- @ -- 1.5h -- @ timedotnumberp :: JournalStateParser m Quantity timedotnumberp = do (q, _, _, _) <- lift numberp lift (many spacenonewline) optional $ char 'h' lift (many spacenonewline) return q -- | Parse a quantity written as a line of dots, each representing 0.25. -- @ -- .... .. -- @ timedotdotsp :: JournalStateParser m Quantity timedotdotsp = do dots <- filter (not.isSpace) <$> many (oneOf (". " :: [Char])) return $ (/4) $ fromIntegral $ length dots tests_Hledger_Read_TimedotReader = TestList [ ] hledger-lib-1.2/Hledger/Reports.hs0000644000000000000000000000234313035210046015260 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Generate several common kinds of report from a journal, as \"*Report\" - simple intermediate data structures intended to be easily rendered as text, html, json, csv etc. by hledger commands, hamlet templates, javascript, or whatever. -} module Hledger.Reports ( module Hledger.Reports.ReportOptions, module Hledger.Reports.EntriesReport, module Hledger.Reports.PostingsReport, module Hledger.Reports.TransactionsReports, module Hledger.Reports.BalanceReport, module Hledger.Reports.MultiBalanceReports, -- module Hledger.Reports.BalanceHistoryReport, -- * Tests tests_Hledger_Reports ) where import Test.HUnit import Hledger.Reports.ReportOptions import Hledger.Reports.EntriesReport import Hledger.Reports.PostingsReport import Hledger.Reports.TransactionsReports import Hledger.Reports.BalanceReport import Hledger.Reports.MultiBalanceReports -- import Hledger.Reports.BalanceHistoryReport tests_Hledger_Reports :: Test tests_Hledger_Reports = TestList $ -- ++ tests_isInterestingIndented [ tests_Hledger_Reports_ReportOptions, tests_Hledger_Reports_EntriesReport, tests_Hledger_Reports_PostingsReport, tests_Hledger_Reports_BalanceReport ] hledger-lib-1.2/Hledger/Reports/BalanceHistoryReport.hs0000644000000000000000000000173513035210046021367 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Account balance history report. -} -- XXX not used module Hledger.Reports.BalanceHistoryReport ( accountBalanceHistory -- -- * Tests -- tests_Hledger_Reports_BalanceReport ) where import Data.Time.Calendar -- import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Reports.ReportOptions import Hledger.Reports.TransactionsReports -- | Get the historical running inclusive balance of a particular account, -- from earliest to latest posting date. accountBalanceHistory :: ReportOpts -> Journal -> Account -> [(Day, MixedAmount)] accountBalanceHistory ropts j a = [(getdate t, bal) | (t,_,_,_,_,bal) <- items] where (_,items) = journalTransactionsReport ropts j acctquery inclusivebal = True acctquery = Acct $ (if inclusivebal then accountNameToAccountRegex else accountNameToAccountOnlyRegex) $ aname a getdate = if date2_ ropts then transactionDate2 else tdate hledger-lib-1.2/Hledger/Reports/BalanceReport.hs0000644000000000000000000003664313067565120020026 0ustar0000000000000000{-| Balance report, used by the balance command. -} {-# LANGUAGE FlexibleInstances, ScopedTypeVariables, OverloadedStrings #-} module Hledger.Reports.BalanceReport ( BalanceReport, BalanceReportItem, balanceReport, balanceReportValue, mixedAmountValue, amountValue, flatShowsExclusiveBalance, -- * Tests tests_Hledger_Reports_BalanceReport ) where import Data.List import Data.Ord import Data.Maybe import Data.Time.Calendar import Test.HUnit import qualified Data.Text as T import Hledger.Data import Hledger.Read (mamountp') import Hledger.Query import Hledger.Utils import Hledger.Reports.ReportOptions -- | A simple single-column balance report. It has: -- -- 1. a list of items, one per account, each containing: -- -- * the full account name -- -- * the Ledger-style elided short account name -- (the leaf account name, prefixed by any boring parents immediately above); -- or with --flat, the full account name again -- -- * the number of indentation steps for rendering a Ledger-style account tree, -- taking into account elided boring parents, --no-elide and --flat -- -- * an amount -- -- 2. the total of all amounts -- type BalanceReport = ([BalanceReportItem], MixedAmount) type BalanceReportItem = (AccountName, AccountName, Int, MixedAmount) -- | When true (the default), this makes balance --flat reports and their implementation clearer. -- Single/multi-col balance reports currently aren't all correct if this is false. flatShowsExclusiveBalance = True -- | Enabling this makes balance --flat --empty also show parent accounts without postings, -- in addition to those with postings and a zero balance. Disabling it shows only the latter. -- No longer supported, but leave this here for a bit. -- flatShowsPostinglessAccounts = True -- | Generate a simple balance report, containing the matched accounts and -- their balances (change of balance) during the specified period. -- This is like PeriodChangeReport with a single column (but more mature, -- eg this can do hierarchical display). balanceReport :: ReportOpts -> Query -> Journal -> BalanceReport balanceReport opts q j = (items, total) where -- dbg1 = const id -- exclude from debug output dbg1 s = let p = "balanceReport" in Hledger.Utils.dbg1 (p++" "++s) -- add prefix in debug output accts = ledgerRootAccount $ ledgerFromJournal q $ journalSelectingAmountFromOpts opts j accts' :: [Account] | queryDepth q == 0 = dbg1 "accts" $ take 1 $ clipAccountsAndAggregate (queryDepth q) $ flattenAccounts accts | flat_ opts = dbg1 "accts" $ filterzeros $ filterempty $ drop 1 $ clipAccountsAndAggregate (queryDepth q) $ flattenAccounts accts | otherwise = dbg1 "accts" $ filter (not.aboring) $ drop 1 $ flattenAccounts $ markboring $ prunezeros $ clipAccounts (queryDepth q) accts where balance = if flat_ opts then aebalance else aibalance filterzeros = if empty_ opts then id else filter (not . isZeroMixedAmount . balance) filterempty = filter (\a -> anumpostings a > 0 || not (isZeroMixedAmount (balance a))) prunezeros = if empty_ opts then id else fromMaybe nullacct . pruneAccounts (isZeroMixedAmount . balance) markboring = if no_elide_ opts then id else markBoringParentAccounts items = dbg1 "items" $ map (balanceReportItem opts q) accts' total | not (flat_ opts) = dbg1 "total" $ sum [amt | (_,_,indent,amt) <- items, indent == 0] | otherwise = dbg1 "total" $ if flatShowsExclusiveBalance then sum $ map fourth4 items else sum $ map aebalance $ clipAccountsAndAggregate 1 accts' -- | In an account tree with zero-balance leaves removed, mark the -- elidable parent accounts (those with one subaccount and no balance -- of their own). markBoringParentAccounts :: Account -> Account markBoringParentAccounts = tieAccountParents . mapAccounts mark where mark a | length (asubs a) == 1 && isZeroMixedAmount (aebalance a) = a{aboring=True} | otherwise = a balanceReportItem :: ReportOpts -> Query -> Account -> BalanceReportItem balanceReportItem opts q a | flat_ opts = (name, name, 0, (if flatShowsExclusiveBalance then aebalance else aibalance) a) | otherwise = (name, elidedname, indent, aibalance a) where name | queryDepth q > 0 = aname a | otherwise = "..." elidedname = accountNameFromComponents (adjacentboringparentnames ++ [accountLeafName name]) adjacentboringparentnames = reverse $ map (accountLeafName.aname) $ takeWhile aboring parents indent = length $ filter (not.aboring) parents -- parents exclude the tree's root node parents = case parentAccounts a of [] -> [] as -> init as -- -- the above using the newer multi balance report code: -- balanceReport' opts q j = (items, total) -- where -- MultiBalanceReport (_,mbrrows,mbrtotals) = PeriodChangeReport opts q j -- items = [(a,a',n, headDef 0 bs) | ((a,a',n), bs) <- mbrrows] -- total = headDef 0 mbrtotals -- | Convert all the amounts in a single-column balance report to -- their value on the given date in their default valuation -- commodities. balanceReportValue :: Journal -> Day -> BalanceReport -> BalanceReport balanceReportValue j d r = r' where (items,total) = r r' = dbg8 "known market prices" (jmarketprices j) `seq` dbg8 "report end date" d `seq` dbg8 "balanceReportValue" ([(n, n', i, mixedAmountValue j d a) |(n,n',i,a) <- items], mixedAmountValue j d total) mixedAmountValue :: Journal -> Day -> MixedAmount -> MixedAmount mixedAmountValue j d (Mixed as) = Mixed $ map (amountValue j d) as -- | Find the market value of this amount on the given date, in it's -- default valuation commodity, based on recorded market prices. -- If no default valuation commodity can be found, the amount is left -- unchanged. amountValue :: Journal -> Day -> Amount -> Amount amountValue j d a = case commodityValue j d (acommodity a) of Just v -> v{aquantity=aquantity v * aquantity a ,aprice=aprice a } Nothing -> a -- | Find the market value, if known, of one unit of this commodity (A) on -- the given valuation date, in the commodity (B) mentioned in the latest -- applicable market price. The latest applicable market price is the market -- price directive for commodity A with the latest date that is on or before -- the valuation date; or if there are multiple such prices with the same date, -- the last parsed. commodityValue :: Journal -> Day -> CommoditySymbol -> Maybe Amount commodityValue j valuationdate c | null applicableprices = dbg Nothing | otherwise = dbg $ Just $ mpamount $ last applicableprices where dbg = dbg8 ("using market price for "++T.unpack c) applicableprices = [p | p <- sortBy (comparing mpdate) $ jmarketprices j , mpcommodity p == c , mpdate p <= valuationdate ] tests_balanceReport = let (opts,journal) `gives` r = do let (eitems, etotal) = r (aitems, atotal) = balanceReport opts (queryFromOpts nulldate opts) journal showw (acct,acct',indent,amt) = (acct, acct', indent, showMixedAmountDebug amt) assertEqual "items" (map showw eitems) (map showw aitems) assertEqual "total" (showMixedAmountDebug etotal) (showMixedAmountDebug atotal) usd0 = usd 0 in [ "balanceReport with no args on null journal" ~: do (defreportopts, nulljournal) `gives` ([], Mixed [nullamt]) ,"balanceReport with no args on sample journal" ~: do (defreportopts, samplejournal) `gives` ([ ("assets","assets",0, mamountp' "$-1.00") ,("assets:bank:saving","bank:saving",1, mamountp' "$1.00") ,("assets:cash","cash",1, mamountp' "$-2.00") ,("expenses","expenses",0, mamountp' "$2.00") ,("expenses:food","food",1, mamountp' "$1.00") ,("expenses:supplies","supplies",1, mamountp' "$1.00") ,("income","income",0, mamountp' "$-2.00") ,("income:gifts","gifts",1, mamountp' "$-1.00") ,("income:salary","salary",1, mamountp' "$-1.00") ,("liabilities:debts","liabilities:debts",0, mamountp' "$1.00") ], Mixed [usd0]) ,"balanceReport with --depth=N" ~: do (defreportopts{depth_=Just 1}, samplejournal) `gives` ([ ("assets", "assets", 0, mamountp' "$-1.00") ,("expenses", "expenses", 0, mamountp' "$2.00") ,("income", "income", 0, mamountp' "$-2.00") ,("liabilities", "liabilities", 0, mamountp' "$1.00") ], Mixed [usd0]) ,"balanceReport with depth:N" ~: do (defreportopts{query_="depth:1"}, samplejournal) `gives` ([ ("assets", "assets", 0, mamountp' "$-1.00") ,("expenses", "expenses", 0, mamountp' "$2.00") ,("income", "income", 0, mamountp' "$-2.00") ,("liabilities", "liabilities", 0, mamountp' "$1.00") ], Mixed [usd0]) ,"balanceReport with a date or secondary date span" ~: do (defreportopts{query_="date:'in 2009'"}, samplejournal2) `gives` ([], Mixed [nullamt]) (defreportopts{query_="date2:'in 2009'"}, samplejournal2) `gives` ([ ("assets:bank:checking","assets:bank:checking",0,mamountp' "$1.00") ,("income:salary","income:salary",0,mamountp' "$-1.00") ], Mixed [usd0]) ,"balanceReport with desc:" ~: do (defreportopts{query_="desc:income"}, samplejournal) `gives` ([ ("assets:bank:checking","assets:bank:checking",0,mamountp' "$1.00") ,("income:salary","income:salary",0, mamountp' "$-1.00") ], Mixed [usd0]) ,"balanceReport with not:desc:" ~: do (defreportopts{query_="not:desc:income"}, samplejournal) `gives` ([ ("assets","assets",0, mamountp' "$-2.00") ,("assets:bank","bank",1, Mixed [usd0]) ,("assets:bank:checking","checking",2,mamountp' "$-1.00") ,("assets:bank:saving","saving",2, mamountp' "$1.00") ,("assets:cash","cash",1, mamountp' "$-2.00") ,("expenses","expenses",0, mamountp' "$2.00") ,("expenses:food","food",1, mamountp' "$1.00") ,("expenses:supplies","supplies",1, mamountp' "$1.00") ,("income:gifts","income:gifts",0, mamountp' "$-1.00") ,("liabilities:debts","liabilities:debts",0, mamountp' "$1.00") ], Mixed [usd0]) {- ,"accounts report with account pattern o" ~: defreportopts{patterns_=["o"]} `gives` [" $1 expenses:food" ," $-2 income" ," $-1 gifts" ," $-1 salary" ,"--------------------" ," $-1" ] ,"accounts report with account pattern o and --depth 1" ~: defreportopts{patterns_=["o"],depth_=Just 1} `gives` [" $1 expenses" ," $-2 income" ,"--------------------" ," $-1" ] ,"accounts report with account pattern a" ~: defreportopts{patterns_=["a"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ," $-1 income:salary" ," $1 liabilities:debts" ,"--------------------" ," $-1" ] ,"accounts report with account pattern e" ~: defreportopts{patterns_=["e"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ," $2 expenses" ," $1 food" ," $1 supplies" ," $-2 income" ," $-1 gifts" ," $-1 salary" ," $1 liabilities:debts" ,"--------------------" ," 0" ] ,"accounts report with unmatched parent of two matched subaccounts" ~: defreportopts{patterns_=["cash","saving"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ,"--------------------" ," $-1" ] ,"accounts report with multi-part account name" ~: defreportopts{patterns_=["expenses:food"]} `gives` [" $1 expenses:food" ,"--------------------" ," $1" ] ,"accounts report with negative account pattern" ~: defreportopts{patterns_=["not:assets"]} `gives` [" $2 expenses" ," $1 food" ," $1 supplies" ," $-2 income" ," $-1 gifts" ," $-1 salary" ," $1 liabilities:debts" ,"--------------------" ," $1" ] ,"accounts report negative account pattern always matches full name" ~: defreportopts{patterns_=["not:e"]} `gives` ["--------------------" ," 0" ] ,"accounts report negative patterns affect totals" ~: defreportopts{patterns_=["expenses","not:food"]} `gives` [" $1 expenses:supplies" ,"--------------------" ," $1" ] ,"accounts report with -E shows zero-balance accounts" ~: defreportopts{patterns_=["assets"],empty_=True} `gives` [" $-1 assets" ," $1 bank" ," 0 checking" ," $1 saving" ," $-2 cash" ,"--------------------" ," $-1" ] ,"accounts report with cost basis" ~: do j <- (readJournal Nothing Nothing Nothing $ unlines ["" ,"2008/1/1 test " ," a:b 10h @ $50" ," c:d " ]) >>= either error' return let j' = journalCanonicaliseAmounts $ journalConvertAmountsToCost j -- enable cost basis adjustment balanceReportAsText defreportopts (balanceReport defreportopts Any j') `is` [" $500 a:b" ," $-500 c:d" ,"--------------------" ," 0" ] -} ] Right samplejournal2 = journalBalanceTransactions False nulljournal{ jtxns = [ txnTieKnot Transaction{ tindex=0, tsourcepos=nullsourcepos, tdate=parsedate "2008/01/01", tdate2=Just $ parsedate "2009/01/01", tstatus=Uncleared, tcode="", tdescription="income", tcomment="", ttags=[], tpostings= [posting {paccount="assets:bank:checking", pamount=Mixed [usd 1]} ,posting {paccount="income:salary", pamount=missingmixedamt} ], tpreceding_comment_lines="" } ] } -- tests_isInterestingIndented = [ -- "isInterestingIndented" ~: do -- let (opts, journal, acctname) `gives` r = isInterestingIndented opts l acctname `is` r -- where l = ledgerFromJournal (queryFromOpts nulldate opts) journal -- (defreportopts, samplejournal, "expenses") `gives` True -- ] tests_Hledger_Reports_BalanceReport :: Test tests_Hledger_Reports_BalanceReport = TestList tests_balanceReport hledger-lib-1.2/Hledger/Reports/EntriesReport.hs0000644000000000000000000000263613035210046020072 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Journal entries report, used by the print command. -} module Hledger.Reports.EntriesReport ( EntriesReport, EntriesReportItem, entriesReport, -- * Tests tests_Hledger_Reports_EntriesReport ) where import Data.List import Data.Ord import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Reports.ReportOptions -- | A journal entries report is a list of whole transactions as -- originally entered in the journal (mostly). This is used by eg -- hledger's print command and hledger-web's journal entries view. type EntriesReport = [EntriesReportItem] type EntriesReportItem = Transaction -- | Select transactions for an entries report. entriesReport :: ReportOpts -> Query -> Journal -> EntriesReport entriesReport opts q j = sortBy (comparing date) $ filter (q `matchesTransaction`) ts where date = transactionDateFn opts ts = jtxns $ journalSelectingAmountFromOpts opts j tests_entriesReport :: [Test] tests_entriesReport = [ "entriesReport" ~: do assertEqual "not acct" 1 (length $ entriesReport defreportopts (Not $ Acct "bank") samplejournal) let sp = mkdatespan "2008/06/01" "2008/07/01" assertEqual "date" 3 (length $ entriesReport defreportopts (Date sp) samplejournal) ] tests_Hledger_Reports_EntriesReport :: Test tests_Hledger_Reports_EntriesReport = TestList $ tests_entriesReport hledger-lib-1.2/Hledger/Reports/MultiBalanceReports.hs0000644000000000000000000002371013067565677021234 0ustar0000000000000000{-# LANGUAGE FlexibleInstances, ScopedTypeVariables #-} {-| Multi-column balance reports, used by the balance command. -} module Hledger.Reports.MultiBalanceReports ( MultiBalanceReport(..), MultiBalanceReportRow, multiBalanceReport, multiBalanceReportValue, singleBalanceReport -- -- * Tests -- tests_Hledger_Reports_MultiBalanceReport ) where import Data.List import Data.Maybe import Data.Ord import Data.Time.Calendar import Safe -- import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Utils import Hledger.Reports.ReportOptions import Hledger.Reports.BalanceReport -- | A multi balance report is a balance report with one or more columns. It has: -- -- 1. a list of each column's period (date span) -- -- 2. a list of row items, each containing: -- -- * the full account name -- -- * the leaf account name -- -- * the account's depth -- -- * the amounts to show in each column -- -- * the total of the row's amounts -- -- * the average of the row's amounts -- -- 3. the column totals and the overall total and average -- -- The meaning of the amounts depends on the type of multi balance -- report, of which there are three: periodic, cumulative and historical -- (see 'BalanceType' and "Hledger.Cli.Balance"). newtype MultiBalanceReport = MultiBalanceReport ([DateSpan] ,[MultiBalanceReportRow] ,MultiBalanceReportTotals ) type MultiBalanceReportRow = (AccountName, AccountName, Int, [MixedAmount], MixedAmount, MixedAmount) type MultiBalanceReportTotals = ([MixedAmount], MixedAmount, MixedAmount) instance Show MultiBalanceReport where -- use ppShow to break long lists onto multiple lines -- we add some bogus extra shows here to help ppShow parse the output -- and wrap tuples and lists properly show (MultiBalanceReport (spans, items, totals)) = "MultiBalanceReport (ignore extra quotes):\n" ++ ppShow (show spans, map show items, totals) -- type alias just to remind us which AccountNames might be depth-clipped, below. type ClippedAccountName = AccountName -- | Generates a single column BalanceReport like balanceReport, but uses -- multiBalanceReport, so supports --historical. -- TODO Does not support boring parent eliding or --flat yet. singleBalanceReport :: ReportOpts -> Query -> Journal -> BalanceReport singleBalanceReport opts q j = (rows', total) where MultiBalanceReport (_, rows, (totals, _, _)) = multiBalanceReport opts q j rows' = [(a ,if flat_ opts then a else a' -- BalanceReport expects full account name here with --flat ,if tree_ opts then d-1 else 0 -- BalanceReport uses 0-based account depths , headDef nullmixedamt amts -- 0 columns is illegal, should not happen, return zeroes if it does ) | (a,a',d, amts, _, _) <- rows] total = headDef nullmixedamt totals -- | Generate a multicolumn balance report for the matched accounts, -- showing the change of balance, accumulated balance, or historical balance -- in each of the specified periods. multiBalanceReport :: ReportOpts -> Query -> Journal -> MultiBalanceReport multiBalanceReport opts q j = MultiBalanceReport (displayspans, items, totalsrow) where symq = dbg1 "symq" $ filterQuery queryIsSym $ dbg1 "requested q" q depthq = dbg1 "depthq" $ filterQuery queryIsDepth q depth = queryDepth depthq depthless = dbg1 "depthless" . filterQuery (not . queryIsDepth) datelessq = dbg1 "datelessq" $ filterQuery (not . queryIsDateOrDate2) q dateqcons = if date2_ opts then Date2 else Date precedingq = dbg1 "precedingq" $ And [datelessq, dateqcons $ DateSpan Nothing (spanStart reportspan)] requestedspan = dbg1 "requestedspan" $ queryDateSpan (date2_ opts) q -- span specified by -b/-e/-p options and query args requestedspan' = dbg1 "requestedspan'" $ requestedspan `spanDefaultsFrom` journalDateSpan (date2_ opts) j -- if open-ended, close it using the journal's end dates intervalspans = dbg1 "intervalspans" $ splitSpan (interval_ opts) requestedspan' -- interval spans enclosing it reportspan = dbg1 "reportspan" $ DateSpan (maybe Nothing spanStart $ headMay intervalspans) -- the requested span enlarged to a whole number of intervals (maybe Nothing spanEnd $ lastMay intervalspans) newdatesq = dbg1 "newdateq" $ dateqcons reportspan reportq = dbg1 "reportq" $ depthless $ And [datelessq, newdatesq] -- user's query enlarged to whole intervals and with no depth limit ps :: [Posting] = dbg1 "ps" $ journalPostings $ filterJournalAmounts symq $ -- remove amount parts excluded by cur: filterJournalPostings reportq $ -- remove postings not matched by (adjusted) query journalSelectingAmountFromOpts opts j displayspans = dbg1 "displayspans" $ splitSpan (interval_ opts) displayspan where displayspan | empty_ opts = dbg1 "displayspan (-E)" reportspan -- all the requested intervals | otherwise = dbg1 "displayspan" $ requestedspan `spanIntersect` matchedspan -- exclude leading/trailing empty intervals matchedspan = dbg1 "matchedspan" $ postingsDateSpan' (whichDateFromOpts opts) ps psPerSpan :: [[Posting]] = dbg1 "psPerSpan" [filter (isPostingInDateSpan' (whichDateFromOpts opts) s) ps | s <- displayspans] postedAcctBalChangesPerSpan :: [[(ClippedAccountName, MixedAmount)]] = dbg1 "postedAcctBalChangesPerSpan" $ map postingAcctBals psPerSpan where postingAcctBals :: [Posting] -> [(ClippedAccountName, MixedAmount)] postingAcctBals ps = [(aname a, (if tree_ opts then aibalance else aebalance) a) | a <- as] where as = depthLimit $ (if tree_ opts then id else filter ((>0).anumpostings)) $ drop 1 $ accountsFromPostings ps depthLimit | tree_ opts = filter ((depthq `matchesAccount`).aname) -- exclude deeper balances | otherwise = clipAccountsAndAggregate depth -- aggregate deeper balances at the depth limit postedAccts :: [AccountName] = dbg1 "postedAccts" $ sort $ accountNamesFromPostings ps -- starting balances and accounts from transactions before the report start date startacctbals = dbg1 "startacctbals" $ map (\(a,_,_,b) -> (a,b)) startbalanceitems where (startbalanceitems,_) = dbg1 "starting balance report" $ balanceReport opts' precedingq j where opts' | tree_ opts = opts{no_elide_=True} | otherwise = opts{accountlistmode_=ALFlat} startingBalanceFor a = fromMaybe nullmixedamt $ lookup a startacctbals startAccts = dbg1 "startAccts" $ map fst startacctbals displayedAccts :: [ClippedAccountName] = dbg1 "displayedAccts" $ (if tree_ opts then expandAccountNames else id) $ nub $ map (clipOrEllipsifyAccountName depth) $ if empty_ opts then nub $ sort $ startAccts ++ postedAccts else postedAccts acctBalChangesPerSpan :: [[(ClippedAccountName, MixedAmount)]] = dbg1 "acctBalChangesPerSpan" [sortBy (comparing fst) $ unionBy (\(a,_) (a',_) -> a == a') postedacctbals zeroes | postedacctbals <- postedAcctBalChangesPerSpan] where zeroes = [(a, nullmixedamt) | a <- displayedAccts] acctBalChanges :: [(ClippedAccountName, [MixedAmount])] = dbg1 "acctBalChanges" [(a, map snd abs) | abs@((a,_):_) <- transpose acctBalChangesPerSpan] -- never null, or used when null... items :: [MultiBalanceReportRow] = dbg1 "items" [(a, accountLeafName a, accountNameLevel a, displayedBals, rowtot, rowavg) | (a,changes) <- acctBalChanges , let displayedBals = case balancetype_ opts of HistoricalBalance -> drop 1 $ scanl (+) (startingBalanceFor a) changes CumulativeChange -> drop 1 $ scanl (+) nullmixedamt changes _ -> changes , let rowtot = sum displayedBals , let rowavg = averageMixedAmounts displayedBals , empty_ opts || depth == 0 || any (not . isZeroMixedAmount) displayedBals ] totals :: [MixedAmount] = -- dbg1 "totals" $ map sum balsbycol where balsbycol = transpose [bs | (a,_,_,bs,_,_) <- items, not (tree_ opts) || a `elem` highestlevelaccts] highestlevelaccts = dbg1 "highestlevelaccts" [a | a <- displayedAccts, not $ any (`elem` displayedAccts) $ init $ expandAccountName a] totalsrow :: MultiBalanceReportTotals = dbg1 "totalsrow" (totals, sum totals, averageMixedAmounts totals) dbg1 s = let p = "multiBalanceReport" in Hledger.Utils.dbg1 (p++" "++s) -- add prefix in this function's debug output -- dbg1 = const id -- exclude this function from debug output -- | Convert all the amounts in a multi-column balance report to their -- value on the given date in their default valuation commodities -- (which are determined as of that date, not the report interval dates). multiBalanceReportValue :: Journal -> Day -> MultiBalanceReport -> MultiBalanceReport multiBalanceReportValue j d r = r' where MultiBalanceReport (spans, rows, (coltotals, rowtotaltotal, rowavgtotal)) = r r' = MultiBalanceReport (spans, [(acct, acct', depth, map convert rowamts, convert rowtotal, convert rowavg) | (acct, acct', depth, rowamts, rowtotal, rowavg) <- rows], (map convert coltotals, convert rowtotaltotal, convert rowavgtotal)) convert = mixedAmountValue j d hledger-lib-1.2/Hledger/Reports/PostingsReport.hs0000644000000000000000000005641613035210046020274 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances, TupleSections, OverloadedStrings #-} {-| Postings report, used by the register command. -} module Hledger.Reports.PostingsReport ( PostingsReport, PostingsReportItem, postingsReport, mkpostingsReportItem, -- * Tests tests_Hledger_Reports_PostingsReport ) where import Data.List import Data.Maybe import Data.Ord (comparing) -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Safe (headMay, lastMay) import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Utils import Hledger.Reports.ReportOptions -- | A postings report is a list of postings with a running total, a label -- for the total field, and a little extra transaction info to help with rendering. -- This is used eg for the register command. type PostingsReport = (String -- label for the running balance column XXX remove ,[PostingsReportItem] -- line items, one per posting ) type PostingsReportItem = (Maybe Day -- The posting date, if this is the first posting in a -- transaction or if it's different from the previous -- posting's date. Or if this a summary posting, the -- report interval's start date if this is the first -- summary posting in the interval. ,Maybe Day -- If this is a summary posting, the report interval's -- end date if this is the first summary posting in -- the interval. ,Maybe String -- The posting's transaction's description, if this is the first posting in the transaction. ,Posting -- The posting, possibly with the account name depth-clipped. ,MixedAmount -- The running total after this posting, or with --average, -- the running average posting amount. With --historical, -- postings before the report start date are included in -- the running total/average. ) -- | Select postings from the journal and add running balance and other -- information to make a postings report. Used by eg hledger's register command. postingsReport :: ReportOpts -> Query -> Journal -> PostingsReport postingsReport opts q j = (totallabel, items) where reportspan = adjustReportDates opts q j whichdate = whichDateFromOpts opts depth = queryDepth q -- postings to be included in the report, and similarly-matched postings before the report start date (precedingps, reportps) = matchedPostingsBeforeAndDuring opts q j reportspan -- postings or pseudo postings to be displayed displayps | interval == NoInterval = map (,Nothing) reportps | otherwise = summarisePostingsByInterval interval whichdate depth showempty reportspan reportps where interval = interval_ opts -- XXX showempty = empty_ opts || average_ opts -- posting report items ready for display items = dbg1 "postingsReport items" $ postingsReportItems displayps (nullposting,Nothing) whichdate depth startbal runningcalc startnum where historical = balancetype_ opts == HistoricalBalance precedingsum = sumPostings precedingps precedingavg | null precedingps = 0 | otherwise = precedingsum `divideMixedAmount` (fromIntegral $ length precedingps) startbal | average_ opts = if historical then precedingavg else 0 | otherwise = if historical then precedingsum else 0 startnum = if historical then length precedingps + 1 else 1 runningcalc | average_ opts = \i avg amt -> avg + (amt - avg) `divideMixedAmount` (fromIntegral i) -- running average | otherwise = \_ bal amt -> bal + amt -- running total totallabel = "Total" -- | Adjust report start/end dates to more useful ones based on -- journal data and report intervals. Ie: -- 1. If the start date is unspecified, use the earliest date in the journal (if any) -- 2. If the end date is unspecified, use the latest date in the journal (if any) -- 3. If a report interval is specified, enlarge the dates to enclose whole intervals adjustReportDates :: ReportOpts -> Query -> Journal -> DateSpan adjustReportDates opts q j = reportspan where -- see also multiBalanceReport requestedspan = dbg1 "requestedspan" $ queryDateSpan' q -- span specified by -b/-e/-p options and query args journalspan = dbg1 "journalspan" $ dates `spanUnion` date2s -- earliest and latest dates (or date2s) in the journal where dates = journalDateSpan False j date2s = journalDateSpan True j requestedspanclosed = dbg1 "requestedspanclosed" $ requestedspan `spanDefaultsFrom` journalspan -- if open-ended, close it using the journal's dates (if any) intervalspans = dbg1 "intervalspans" $ splitSpan (interval_ opts) requestedspanclosed -- get the whole intervals enclosing that mreportstart = dbg1 "reportstart" $ maybe Nothing spanStart $ headMay intervalspans -- start of the first interval, or open ended mreportend = dbg1 "reportend" $ maybe Nothing spanEnd $ lastMay intervalspans -- end of the last interval, or open ended reportspan = dbg1 "reportspan" $ DateSpan mreportstart mreportend -- the requested span enlarged to whole intervals if possible -- | Find postings matching a given query, within a given date span, -- and also any similarly-matched postings before that date span. -- Date restrictions and depth restrictions in the query are ignored. -- A helper for the postings report. matchedPostingsBeforeAndDuring :: ReportOpts -> Query -> Journal -> DateSpan -> ([Posting],[Posting]) matchedPostingsBeforeAndDuring opts q j (DateSpan mstart mend) = dbg1 "beforeps, duringps" $ span (beforestartq `matchesPosting`) beforeandduringps where beforestartq = dbg1 "beforestartq" $ dateqtype $ DateSpan Nothing mstart beforeandduringps = dbg1 "ps4" $ sortBy (comparing sortdate) $ -- sort postings by date or date2 dbg1 "ps3" $ map (filterPostingAmount symq) $ -- remove amount parts which the query's cur: terms would exclude dbg1 "ps2" $ (if related_ opts then concatMap relatedPostings else id) $ -- with -r, replace each with its sibling postings dbg1 "ps1" $ filter (beforeandduringq `matchesPosting`) $ -- filter postings by the query, with no start date or depth limit journalPostings $ journalSelectingAmountFromOpts opts j where beforeandduringq = dbg1 "beforeandduringq" $ And [depthless $ dateless q, beforeendq] where depthless = filterQuery (not . queryIsDepth) dateless = filterQuery (not . queryIsDateOrDate2) beforeendq = dateqtype $ DateSpan Nothing mend sortdate = if date2_ opts then postingDate2 else postingDate symq = dbg1 "symq" $ filterQuery queryIsSym q dateqtype | queryIsDate2 dateq || (queryIsDate dateq && date2_ opts) = Date2 | otherwise = Date where dateq = dbg1 "dateq" $ filterQuery queryIsDateOrDate2 $ dbg1 "q" q -- XXX confused by multiple date:/date2: ? -- | Generate postings report line items from a list of postings or (with -- non-Nothing dates attached) summary postings. postingsReportItems :: [(Posting,Maybe Day)] -> (Posting,Maybe Day) -> WhichDate -> Int -> MixedAmount -> (Int -> MixedAmount -> MixedAmount -> MixedAmount) -> Int -> [PostingsReportItem] postingsReportItems [] _ _ _ _ _ _ = [] postingsReportItems ((p,menddate):ps) (pprev,menddateprev) wd d b runningcalcfn itemnum = i:(postingsReportItems ps (p,menddate) wd d b' runningcalcfn (itemnum+1)) where i = mkpostingsReportItem showdate showdesc wd menddate p' b' (showdate, showdesc) | isJust menddate = (menddate /= menddateprev, False) | otherwise = (isfirstintxn || isdifferentdate, isfirstintxn) isfirstintxn = ptransaction p /= ptransaction pprev isdifferentdate = case wd of PrimaryDate -> postingDate p /= postingDate pprev SecondaryDate -> postingDate2 p /= postingDate2 pprev p' = p{paccount= clipOrEllipsifyAccountName d $ paccount p} b' = runningcalcfn itemnum b (pamount p) -- | Generate one postings report line item, containing the posting, -- the current running balance, and optionally the posting date and/or -- the transaction description. mkpostingsReportItem :: Bool -> Bool -> WhichDate -> Maybe Day -> Posting -> MixedAmount -> PostingsReportItem mkpostingsReportItem showdate showdesc wd menddate p b = (if showdate then Just date else Nothing ,menddate ,if showdesc then Just desc else Nothing ,p ,b ) where date = case wd of PrimaryDate -> postingDate p SecondaryDate -> postingDate2 p desc = T.unpack $ maybe "" tdescription $ ptransaction p -- | Convert a list of postings into summary postings, one per interval, -- aggregated to the specified depth if any. summarisePostingsByInterval :: Interval -> WhichDate -> Int -> Bool -> DateSpan -> [Posting] -> [SummaryPosting] summarisePostingsByInterval interval wd depth showempty reportspan ps = concatMap summarisespan $ splitSpan interval reportspan where summarisespan s = summarisePostingsInDateSpan s wd depth showempty (postingsinspan s) postingsinspan s = filter (isPostingInDateSpan' wd s) ps tests_summarisePostingsByInterval = [ "summarisePostingsByInterval" ~: do summarisePostingsByInterval (Quarters 1) PrimaryDate 99999 False (DateSpan Nothing Nothing) [] ~?= [] ] -- | A summary posting summarises the activity in one account within a report -- interval. It is currently kludgily represented by a regular Posting with no -- description, the interval's start date stored as the posting date, and the -- interval's end date attached with a tuple. type SummaryPosting = (Posting, Maybe Day) -- | Given a date span (representing a report interval) and a list of -- postings within it, aggregate the postings into one summary posting per -- account. -- -- When a depth argument is present, postings to accounts of greater -- depth are also aggregated where possible. If the depth is 0, all -- postings in the span are aggregated into a single posting with -- account name "...". -- -- The showempty flag includes spans with no postings and also postings -- with 0 amount. -- summarisePostingsInDateSpan :: DateSpan -> WhichDate -> Int -> Bool -> [Posting] -> [SummaryPosting] summarisePostingsInDateSpan (DateSpan b e) wd depth showempty ps | null ps && (isNothing b || isNothing e) = [] | null ps && showempty = [(summaryp, Just e')] | otherwise = summarypes where postingdate = if wd == PrimaryDate then postingDate else postingDate2 b' = fromMaybe (maybe nulldate postingdate $ headMay ps) b e' = fromMaybe (maybe (addDays 1 nulldate) postingdate $ lastMay ps) e summaryp = nullposting{pdate=Just b'} clippedanames | depth > 0 = nub $ map (clipAccountName depth) anames | otherwise = ["..."] summaryps | depth > 0 = [summaryp{paccount=a,pamount=balance a} | a <- clippedanames] | otherwise = [summaryp{paccount="...",pamount=sum $ map pamount ps}] summarypes = map (, Just e') $ (if showempty then id else filter (not . isZeroMixedAmount . pamount)) summaryps anames = sort $ nub $ map paccount ps -- aggregate balances by account, like ledgerFromJournal, then do depth-clipping accts = accountsFromPostings ps balance a = maybe nullmixedamt bal $ lookupAccount a accts where bal = if isclipped a then aibalance else aebalance isclipped a = accountNameLevel a >= depth -- tests_summarisePostingsInDateSpan = [ -- "summarisePostingsInDateSpan" ~: do -- let gives (b,e,depth,showempty,ps) = -- (summarisePostingsInDateSpan (mkdatespan b e) depth showempty ps `is`) -- let ps = -- [ -- nullposting{lpdescription="desc",lpaccount="expenses:food:groceries",lpamount=Mixed [usd 1]} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food:dining", lpamount=Mixed [usd 2]} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food", lpamount=Mixed [usd 4]} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food:dining", lpamount=Mixed [usd 8]} -- ] -- ("2008/01/01","2009/01/01",0,9999,False,[]) `gives` -- [] -- ("2008/01/01","2009/01/01",0,9999,True,[]) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31"} -- ] -- ("2008/01/01","2009/01/01",0,9999,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food", lpamount=Mixed [usd 4]} -- ,nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food:dining", lpamount=Mixed [usd 10]} -- ,nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food:groceries",lpamount=Mixed [usd 1]} -- ] -- ("2008/01/01","2009/01/01",0,2,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses:food",lpamount=Mixed [usd 15]} -- ] -- ("2008/01/01","2009/01/01",0,1,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="expenses",lpamount=Mixed [usd 15]} -- ] -- ("2008/01/01","2009/01/01",0,0,False,ts) `gives` -- [ -- nullposting{lpdate=parsedate "2008/01/01",lpdescription="- 2008/12/31",lpaccount="",lpamount=Mixed [usd 15]} -- ] tests_postingsReport = [ "postingsReport" ~: do -- with the query specified explicitly let (query, journal) `gives` n = (length $ snd $ postingsReport defreportopts query journal) `is` n (Any, nulljournal) `gives` 0 (Any, samplejournal) `gives` 11 -- register --depth just clips account names (Depth 2, samplejournal) `gives` 11 (And [Depth 1, Status Cleared, Acct "expenses"], samplejournal) `gives` 2 (And [And [Depth 1, Status Cleared], Acct "expenses"], samplejournal) `gives` 2 -- with query and/or command-line options assertEqual "" 11 (length $ snd $ postingsReport defreportopts Any samplejournal) assertEqual "" 9 (length $ snd $ postingsReport defreportopts{interval_=Months 1} Any samplejournal) assertEqual "" 19 (length $ snd $ postingsReport defreportopts{interval_=Months 1, empty_=True} Any samplejournal) assertEqual "" 4 (length $ snd $ postingsReport defreportopts (Acct "assets:bank:checking") samplejournal) -- (defreportopts, And [Acct "a a", Acct "'b"], samplejournal2) `gives` 0 -- [(Just (parsedate "2008-01-01","income"),assets:bank:checking $1,$1) -- ,(Nothing,income:salary $-1,0) -- ,(Just (2008-06-01,"gift"),assets:bank:checking $1,$1) -- ,(Nothing,income:gifts $-1,0) -- ,(Just (2008-06-02,"save"),assets:bank:saving $1,$1) -- ,(Nothing,assets:bank:checking $-1,0) -- ,(Just (2008-06-03,"eat & shop"),expenses:food $1,$1) -- ,(Nothing,expenses:supplies $1,$2) -- ,(Nothing,assets:cash $-2,0) -- ,(Just (2008-12-31,"pay off"),liabilities:debts $1,$1) -- ,(Nothing,assets:bank:checking $-1,0) -- ] {- let opts = defreportopts (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank:checking $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank:checking $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank:saving $1 $1" ," assets:bank:checking $-1 0" ,"2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report with cleared option" ~: do let opts = defreportopts{cleared_=True} j <- readJournal' sample_journal_str (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report with uncleared option" ~: do let opts = defreportopts{uncleared_=True} j <- readJournal' sample_journal_str (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank:checking $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank:checking $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank:saving $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report sorts by date" ~: do j <- readJournal' $ unlines ["2008/02/02 a" ," b 1" ," c" ,"" ,"2008/01/01 d" ," e 1" ," f" ] let opts = defreportopts registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/02/02"] ,"postings report with account pattern" ~: do j <- samplejournal let opts = defreportopts{patterns_=["cash"]} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop assets:cash $-2 $-2" ] ,"postings report with account pattern, case insensitive" ~: do j <- samplejournal let opts = defreportopts{patterns_=["cAsH"]} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop assets:cash $-2 $-2" ] ,"postings report with display expression" ~: do j <- samplejournal let gives displayexpr = (registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is`) where opts = defreportopts{display_=Just displayexpr} "d<[2008/6/2]" `gives` ["2008/01/01","2008/06/01"] "d<=[2008/6/2]" `gives` ["2008/01/01","2008/06/01","2008/06/02"] "d=[2008/6/2]" `gives` ["2008/06/02"] "d>=[2008/6/2]" `gives` ["2008/06/02","2008/06/03","2008/12/31"] "d>[2008/6/2]" `gives` ["2008/06/03","2008/12/31"] ,"postings report with period expression" ~: do j <- samplejournal let periodexpr `gives` dates = do j' <- samplejournal registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j') `is` dates where opts = defreportopts{period_=maybePeriod date1 periodexpr} "" `gives` ["2008/01/01","2008/06/01","2008/06/02","2008/06/03","2008/12/31"] "2008" `gives` ["2008/01/01","2008/06/01","2008/06/02","2008/06/03","2008/12/31"] "2007" `gives` [] "june" `gives` ["2008/06/01","2008/06/02","2008/06/03"] "monthly" `gives` ["2008/01/01","2008/06/01","2008/12/01"] "quarterly" `gives` ["2008/01/01","2008/04/01","2008/10/01"] let opts = defreportopts{period_=maybePeriod date1 "yearly"} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 - 2008/12/31 assets:bank:saving $1 $1" ," assets:cash $-2 $-1" ," expenses:food $1 0" ," expenses:supplies $1 $1" ," income:gifts $-1 0" ," income:salary $-1 $-1" ," liabilities:debts $1 0" ] let opts = defreportopts{period_=maybePeriod date1 "quarterly"} registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/04/01","2008/10/01"] let opts = defreportopts{period_=maybePeriod date1 "quarterly",empty_=True} registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/04/01","2008/07/01","2008/10/01"] ] , "postings report with depth arg" ~: do j <- samplejournal let opts = defreportopts{depth_=Just 2} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank $1 $1" ," assets:bank $-1 0" ,"2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank $-1 0" ] -} ] tests_Hledger_Reports_PostingsReport :: Test tests_Hledger_Reports_PostingsReport = TestList $ tests_summarisePostingsByInterval ++ tests_postingsReport hledger-lib-1.2/Hledger/Reports/ReportOptions.hs0000644000000000000000000003550513066774455020144 0ustar0000000000000000{-# LANGUAGE CPP, RecordWildCards, DeriveDataTypeable #-} {-| Options common to most hledger reports. -} module Hledger.Reports.ReportOptions ( ReportOpts(..), BalanceType(..), AccountListMode(..), FormatStr, defreportopts, rawOptsToReportOpts, checkReportOpts, flat_, tree_, whichDateFromOpts, journalSelectingAmountFromOpts, queryFromOpts, queryFromOptsOnly, queryOptsFromOpts, transactionDateFn, postingDateFn, reportStartDate, reportEndDate, reportStartEndDates, tests_Hledger_Reports_ReportOptions ) where import Data.Data (Data) #if !MIN_VERSION_base(4,8,0) import Data.Functor.Compat ((<$>)) #endif import Data.Maybe import qualified Data.Text as T import Data.Typeable (Typeable) import Data.Time.Calendar import Data.Default import Safe import Test.HUnit import Text.Megaparsec.Error import Hledger.Data import Hledger.Query import Hledger.Utils type FormatStr = String -- | Which "balance" is being shown in a balance report. data BalanceType = PeriodChange -- ^ The change of balance in each period. | CumulativeChange -- ^ The accumulated change across multiple periods. | HistoricalBalance -- ^ The historical ending balance, including the effect of -- all postings before the report period. Unless altered by, -- a query, this is what you would see on a bank statement. deriving (Eq,Show,Data,Typeable) instance Default BalanceType where def = PeriodChange -- | Should accounts be displayed: in the command's default style, hierarchically, or as a flat list ? data AccountListMode = ALDefault | ALTree | ALFlat deriving (Eq, Show, Data, Typeable) instance Default AccountListMode where def = ALDefault -- | Standard options for customising report filtering and output, -- corresponding to hledger's command-line options and query language -- arguments. Used in hledger-lib and above. data ReportOpts = ReportOpts { period_ :: Period ,interval_ :: Interval ,clearedstatus_ :: Maybe ClearedStatus ,cost_ :: Bool ,depth_ :: Maybe Int ,display_ :: Maybe DisplayExp ,date2_ :: Bool ,empty_ :: Bool ,no_elide_ :: Bool ,real_ :: Bool ,format_ :: Maybe FormatStr ,query_ :: String -- all arguments, as a string -- register only ,average_ :: Bool ,related_ :: Bool -- balance only ,balancetype_ :: BalanceType ,accountlistmode_ :: AccountListMode ,drop_ :: Int ,row_total_ :: Bool ,no_total_ :: Bool ,value_ :: Bool ,pretty_tables_ :: Bool } deriving (Show, Data, Typeable) instance Default ReportOpts where def = defreportopts instance Default Bool where def = False defreportopts :: ReportOpts defreportopts = ReportOpts def def def def def def def def def def def def def def def def def def def def def rawOptsToReportOpts :: RawOpts -> IO ReportOpts rawOptsToReportOpts rawopts = checkReportOpts <$> do d <- getCurrentDay let rawopts' = checkRawOpts rawopts return defreportopts{ period_ = periodFromRawOpts d rawopts' ,interval_ = intervalFromRawOpts rawopts' ,clearedstatus_ = clearedStatusFromRawOpts rawopts' ,cost_ = boolopt "cost" rawopts' ,depth_ = maybeintopt "depth" rawopts' ,display_ = maybedisplayopt d rawopts' ,date2_ = boolopt "date2" rawopts' ,empty_ = boolopt "empty" rawopts' ,no_elide_ = boolopt "no-elide" rawopts' ,real_ = boolopt "real" rawopts' ,format_ = maybestringopt "format" rawopts' -- XXX move to CliOpts or move validation from Cli.CliOptions to here ,query_ = unwords $ listofstringopt "args" rawopts' -- doesn't handle an arg like "" right ,average_ = boolopt "average" rawopts' ,related_ = boolopt "related" rawopts' ,balancetype_ = balancetypeopt rawopts' ,accountlistmode_ = accountlistmodeopt rawopts' ,drop_ = intopt "drop" rawopts' ,row_total_ = boolopt "row-total" rawopts' ,no_total_ = boolopt "no-total" rawopts' ,value_ = boolopt "value" rawopts' ,pretty_tables_ = boolopt "pretty-tables" rawopts' } -- | Do extra validation of raw option values, raising an error if there's a problem. checkRawOpts :: RawOpts -> RawOpts checkRawOpts rawopts -- our standard behaviour is to accept conflicting options actually, -- using the last one - more forgiving for overriding command-line aliases -- | countopts ["change","cumulative","historical"] > 1 -- = usageError "please specify at most one of --change, --cumulative, --historical" -- | countopts ["flat","tree"] > 1 -- = usageError "please specify at most one of --flat, --tree" -- | countopts ["daily","weekly","monthly","quarterly","yearly"] > 1 -- = usageError "please specify at most one of --daily, " | otherwise = rawopts -- where -- countopts = length . filter (`boolopt` rawopts) -- | Do extra validation of report options, raising an error if there's a problem. checkReportOpts :: ReportOpts -> ReportOpts checkReportOpts ropts@ReportOpts{..} = either usageError (const ropts) $ do case depth_ of Just d | d < 0 -> Left "--depth should have a positive number" _ -> Right () accountlistmodeopt :: RawOpts -> AccountListMode accountlistmodeopt rawopts = case reverse $ filter (`elem` ["tree","flat"]) $ map fst rawopts of ("tree":_) -> ALTree ("flat":_) -> ALFlat _ -> ALDefault balancetypeopt :: RawOpts -> BalanceType balancetypeopt rawopts = case reverse $ filter (`elem` ["change","cumulative","historical"]) $ map fst rawopts of ("historical":_) -> HistoricalBalance ("cumulative":_) -> CumulativeChange _ -> PeriodChange -- Get the period specified by the intersection of -b/--begin, -e/--end and/or -- -p/--period options, using the given date to interpret relative date expressions. periodFromRawOpts :: Day -> RawOpts -> Period periodFromRawOpts d rawopts = case (mearliestb, mlateste) of (Nothing, Nothing) -> PeriodAll (Just b, Nothing) -> PeriodFrom b (Nothing, Just e) -> PeriodTo e (Just b, Just e) -> simplifyPeriod $ PeriodBetween b e where mearliestb = case beginDatesFromRawOpts d rawopts of [] -> Nothing bs -> Just $ minimum bs mlateste = case endDatesFromRawOpts d rawopts of [] -> Nothing es -> Just $ maximum es -- Get all begin dates specified by -b/--begin or -p/--period options, in order, -- using the given date to interpret relative date expressions. beginDatesFromRawOpts :: Day -> RawOpts -> [Day] beginDatesFromRawOpts d = catMaybes . map (begindatefromrawopt d) where begindatefromrawopt d (n,v) | n == "begin" = either (\e -> usageError $ "could not parse "++n++" date: "++parseErrorPretty e) Just $ fixSmartDateStrEither' d (T.pack v) | n == "period" = case either (\e -> usageError $ "could not parse period option: "++parseErrorPretty e) id $ parsePeriodExpr d (stripquotes $ T.pack v) of (_, DateSpan (Just b) _) -> Just b _ -> Nothing | otherwise = Nothing -- Get all end dates specified by -e/--end or -p/--period options, in order, -- using the given date to interpret relative date expressions. endDatesFromRawOpts :: Day -> RawOpts -> [Day] endDatesFromRawOpts d = catMaybes . map (enddatefromrawopt d) where enddatefromrawopt d (n,v) | n == "end" = either (\e -> usageError $ "could not parse "++n++" date: "++parseErrorPretty e) Just $ fixSmartDateStrEither' d (T.pack v) | n == "period" = case either (\e -> usageError $ "could not parse period option: "++parseErrorPretty e) id $ parsePeriodExpr d (stripquotes $ T.pack v) of (_, DateSpan _ (Just e)) -> Just e _ -> Nothing | otherwise = Nothing -- | Get the report interval, if any, specified by the last of -p/--period, -- -D/--daily, -W/--weekly, -M/--monthly etc. options. intervalFromRawOpts :: RawOpts -> Interval intervalFromRawOpts = lastDef NoInterval . catMaybes . map intervalfromrawopt where intervalfromrawopt (n,v) | n == "period" = either (\e -> usageError $ "could not parse period option: "++parseErrorPretty e) (Just . fst) $ parsePeriodExpr nulldate (stripquotes $ T.pack v) -- reference date does not affect the interval | n == "daily" = Just $ Days 1 | n == "weekly" = Just $ Weeks 1 | n == "monthly" = Just $ Months 1 | n == "quarterly" = Just $ Quarters 1 | n == "yearly" = Just $ Years 1 | otherwise = Nothing -- | Get the cleared status, if any, specified by the last of -C/--cleared, -- --pending, -U/--uncleared options. clearedStatusFromRawOpts :: RawOpts -> Maybe ClearedStatus clearedStatusFromRawOpts = lastMay . catMaybes . map clearedstatusfromrawopt where clearedstatusfromrawopt (n,_) | n == "cleared" = Just Cleared | n == "pending" = Just Pending | n == "uncleared" = Just Uncleared | otherwise = Nothing type DisplayExp = String maybedisplayopt :: Day -> RawOpts -> Maybe DisplayExp maybedisplayopt d rawopts = maybe Nothing (Just . regexReplaceBy "\\[.+?\\]" fixbracketeddatestr) $ maybestringopt "display" rawopts where fixbracketeddatestr "" = "" fixbracketeddatestr s = "[" ++ fixSmartDateStr d (T.pack $ init $ tail s) ++ "]" -- | Select the Transaction date accessor based on --date2. transactionDateFn :: ReportOpts -> (Transaction -> Day) transactionDateFn ReportOpts{..} = if date2_ then transactionDate2 else tdate -- | Select the Posting date accessor based on --date2. postingDateFn :: ReportOpts -> (Posting -> Day) postingDateFn ReportOpts{..} = if date2_ then postingDate2 else postingDate -- | Report which date we will report on based on --date2. whichDateFromOpts :: ReportOpts -> WhichDate whichDateFromOpts ReportOpts{..} = if date2_ then SecondaryDate else PrimaryDate -- | Legacy-compatible convenience aliases for accountlistmode_. tree_ :: ReportOpts -> Bool tree_ = (==ALTree) . accountlistmode_ flat_ :: ReportOpts -> Bool flat_ = (==ALFlat) . accountlistmode_ -- depthFromOpts :: ReportOpts -> Int -- depthFromOpts opts = min (fromMaybe 99999 $ depth_ opts) (queryDepth $ queryFromOpts nulldate opts) -- | Convert this journal's postings' amounts to the cost basis amounts if -- specified by options. journalSelectingAmountFromOpts :: ReportOpts -> Journal -> Journal journalSelectingAmountFromOpts opts | cost_ opts = journalConvertAmountsToCost | otherwise = id -- | Convert report options and arguments to a query. queryFromOpts :: Day -> ReportOpts -> Query queryFromOpts d ReportOpts{..} = simplifyQuery $ And $ [flagsq, argsq] where flagsq = And $ [(if date2_ then Date2 else Date) $ periodAsDateSpan period_] ++ (if real_ then [Real True] else []) ++ (if empty_ then [Empty True] else []) -- ? ++ (maybe [] ((:[]) . Status) clearedstatus_) ++ (maybe [] ((:[]) . Depth) depth_) argsq = fst $ parseQuery d (T.pack query_) -- | Convert report options to a query, ignoring any non-flag command line arguments. queryFromOptsOnly :: Day -> ReportOpts -> Query queryFromOptsOnly _d ReportOpts{..} = simplifyQuery flagsq where flagsq = And $ [(if date2_ then Date2 else Date) $ periodAsDateSpan period_] ++ (if real_ then [Real True] else []) ++ (if empty_ then [Empty True] else []) -- ? ++ (maybe [] ((:[]) . Status) clearedstatus_) ++ (maybe [] ((:[]) . Depth) depth_) tests_queryFromOpts :: [Test] tests_queryFromOpts = [ "queryFromOpts" ~: do assertEqual "" Any (queryFromOpts nulldate defreportopts) assertEqual "" (Acct "a") (queryFromOpts nulldate defreportopts{query_="a"}) assertEqual "" (Desc "a a") (queryFromOpts nulldate defreportopts{query_="desc:'a a'"}) assertEqual "" (Date $ mkdatespan "2012/01/01" "2013/01/01") (queryFromOpts nulldate defreportopts{period_=PeriodFrom (parsedate "2012/01/01") ,query_="date:'to 2013'" }) assertEqual "" (Date2 $ mkdatespan "2012/01/01" "2013/01/01") (queryFromOpts nulldate defreportopts{query_="date2:'in 2012'"}) assertEqual "" (Or [Acct "a a", Acct "'b"]) (queryFromOpts nulldate defreportopts{query_="'a a' 'b"}) ] -- | Convert report options and arguments to query options. queryOptsFromOpts :: Day -> ReportOpts -> [QueryOpt] queryOptsFromOpts d ReportOpts{..} = flagsqopts ++ argsqopts where flagsqopts = [] argsqopts = snd $ parseQuery d (T.pack query_) tests_queryOptsFromOpts :: [Test] tests_queryOptsFromOpts = [ "queryOptsFromOpts" ~: do assertEqual "" [] (queryOptsFromOpts nulldate defreportopts) assertEqual "" [] (queryOptsFromOpts nulldate defreportopts{query_="a"}) assertEqual "" [] (queryOptsFromOpts nulldate defreportopts{period_=PeriodFrom (parsedate "2012/01/01") ,query_="date:'to 2013'" }) ] -- | The effective report start date is the one specified by options or queries, -- otherwise the earliest transaction or posting date in the journal, -- otherwise (for an empty journal) nothing. -- Needs IO to parse smart dates in options/queries. reportStartDate :: Journal -> ReportOpts -> IO (Maybe Day) reportStartDate j ropts = (fst <$>) <$> reportStartEndDates j ropts -- | The effective report end date is the one specified by options or queries, -- otherwise the latest transaction or posting date in the journal, -- otherwise (for an empty journal) nothing. -- Needs IO to parse smart dates in options/queries. reportEndDate :: Journal -> ReportOpts -> IO (Maybe Day) reportEndDate j ropts = (snd <$>) <$> reportStartEndDates j ropts reportStartEndDates :: Journal -> ReportOpts -> IO (Maybe (Day,Day)) reportStartEndDates j ropts = do today <- getCurrentDay let q = queryFromOpts today ropts mrequestedstartdate = queryStartDate False q mrequestedenddate = queryEndDate False q return $ case journalDateSpan False j of -- don't bother with secondary dates DateSpan (Just journalstartdate) (Just journalenddate) -> Just (fromMaybe journalstartdate mrequestedstartdate, fromMaybe journalenddate mrequestedenddate) _ -> Nothing tests_Hledger_Reports_ReportOptions :: Test tests_Hledger_Reports_ReportOptions = TestList $ tests_queryFromOpts ++ tests_queryOptsFromOpts hledger-lib-1.2/Hledger/Reports/TransactionsReports.hs0000644000000000000000000003305413035210046021312 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveDataTypeable, FlexibleInstances #-} {-| Here are several variants of a transactions report. Transactions reports are like a postings report, but more transaction-oriented, and (in the account-centric variant) relative to a some base account. They are used by hledger-web. -} module Hledger.Reports.TransactionsReports ( TransactionsReport, TransactionsReportItem, AccountTransactionsReport, AccountTransactionsReportItem, triOrigTransaction, triDate, triAmount, triBalance, triCommodityAmount, triCommodityBalance, journalTransactionsReport, accountTransactionsReport, transactionsReportByCommodity, transactionRegisterDate -- -- * Tests -- tests_Hledger_Reports_TransactionsReports ) where import Data.List import Data.Ord -- import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar -- import Test.HUnit import Hledger.Data import Hledger.Query import Hledger.Reports.ReportOptions import Hledger.Utils.Debug -- | A transactions report includes a list of transactions -- (posting-filtered and unfiltered variants), a running balance, and some -- other information helpful for rendering a register view (a flag -- indicating multiple other accounts and a display string describing -- them) with or without a notion of current account(s). -- Two kinds of report use this data structure, see journalTransactionsReport -- and accountTransactionsReport below for detais. type TransactionsReport = (String -- label for the balance column, eg "balance" or "total" ,[TransactionsReportItem] -- line items, one per transaction ) type TransactionsReportItem = (Transaction -- the original journal transaction, unmodified ,Transaction -- the transaction as seen from a particular account, with postings maybe filtered ,Bool -- is this a split, ie more than one other account posting ,String -- a display string describing the other account(s), if any ,MixedAmount -- the amount posted to the current account(s) by the filtered postings (or total amount posted) ,MixedAmount -- the running total of item amounts, starting from zero; -- or with --historical, the running total including items -- (matched by the report query) preceding the report period ) triOrigTransaction (torig,_,_,_,_,_) = torig triDate (_,tacct,_,_,_,_) = tdate tacct triAmount (_,_,_,_,a,_) = a triBalance (_,_,_,_,_,a) = a triCommodityAmount c = filterMixedAmountByCommodity c . triAmount triCommodityBalance c = filterMixedAmountByCommodity c . triBalance ------------------------------------------------------------------------------- -- | Select transactions from the whole journal. This is similar to a -- "postingsReport" except with transaction-based report items which -- are ordered most recent first. XXX Or an EntriesReport - use that instead ? -- This is used by hledger-web's journal view. journalTransactionsReport :: ReportOpts -> Journal -> Query -> TransactionsReport journalTransactionsReport opts j q = (totallabel, items) where -- XXX items' first element should be the full transaction with all postings items = reverse $ accountTransactionsReportItems q None nullmixedamt id ts ts = sortBy (comparing date) $ filter (q `matchesTransaction`) $ jtxns $ journalSelectingAmountFromOpts opts j date = transactionDateFn opts ------------------------------------------------------------------------------- -- | An account transactions report represents transactions affecting -- a particular account (or possibly several accounts, but we don't -- use that). It is used eg by hledger-ui's and hledger-web's account -- register view, where we want to show one row per transaction, in -- the context of the current account. Report items consist of: -- -- - the transaction, unmodified -- -- - the transaction as seen in the context of the current account and query, -- which means: -- -- - the transaction date is set to the "transaction context date", -- which can be different from the transaction's general date: -- if postings to the current account (and matched by the report query) -- have their own dates, it's the earliest of these dates. -- -- - the transaction's postings are filtered, excluding any which are not -- matched by the report query -- -- - a text description of the other account(s) posted to/from -- -- - a flag indicating whether there's more than one other account involved -- -- - the total increase/decrease to the current account -- -- - the report transactions' running total after this transaction; -- or if historical balance is requested (-H), the historical running total. -- The historical running total includes transactions from before the -- report start date if one is specified, filtered by the report query. -- The historical running total may or may not be the account's historical -- running balance, depending on the report query. -- -- Items are sorted by transaction register date (the earliest date the transaction -- posts to the current account), most recent first. -- Reporting intervals are currently ignored. -- type AccountTransactionsReport = (String -- label for the balance column, eg "balance" or "total" ,[AccountTransactionsReportItem] -- line items, one per transaction ) type AccountTransactionsReportItem = ( Transaction -- the transaction, unmodified ,Transaction -- the transaction, as seen from the current account ,Bool -- is this a split (more than one posting to other accounts) ? ,String -- a display string describing the other account(s), if any ,MixedAmount -- the amount posted to the current account(s) (or total amount posted) ,MixedAmount -- the register's running total or the current account(s)'s historical balance, after this transaction ) accountTransactionsReport :: ReportOpts -> Journal -> Query -> Query -> AccountTransactionsReport accountTransactionsReport opts j reportq thisacctq = (label, items) where -- a depth limit does not affect the account transactions report -- seems unnecessary for some reason XXX reportq' = -- filterQuery (not . queryIsDepth) reportq -- get all transactions, with amounts converted to cost basis if -B ts1 = jtxns $ journalSelectingAmountFromOpts opts j -- apply any cur:SYM filters in reportq' symq = filterQuery queryIsSym reportq' ts2 = (if queryIsNull symq then id else map (filterTransactionAmounts symq)) ts1 -- keep just the transactions affecting this account (via possibly realness or status-filtered postings) realq = filterQuery queryIsReal reportq' statusq = filterQuery queryIsStatus reportq' ts3 = filter (matchesTransaction thisacctq . filterTransactionPostings (And [realq, statusq])) ts2 -- sort by the transaction's register date, for accurate starting balance ts = sortBy (comparing (transactionRegisterDate reportq' thisacctq)) ts3 (startbal,label) | balancetype_ opts == HistoricalBalance = (sumPostings priorps, balancelabel) | otherwise = (nullmixedamt, totallabel) where priorps = dbg1 "priorps" $ filter (matchesPosting (dbg1 "priorq" $ And [thisacctq, tostartdateq, datelessreportq])) $ transactionsPostings ts tostartdateq = case mstartdate of Just _ -> Date (DateSpan Nothing mstartdate) Nothing -> None -- no start date specified, there are no prior postings mstartdate = queryStartDate (date2_ opts) reportq' datelessreportq = filterQuery (not . queryIsDateOrDate2) reportq' items = reverse $ -- see also registerChartHtml accountTransactionsReportItems reportq' thisacctq startbal negate ts totallabel = "Period Total" balancelabel = "Historical Total" -- | Generate transactions report items from a list of transactions, -- using the provided user-specified report query, a query specifying -- which account to use as the focus, a starting balance, a sign-setting -- function and a balance-summing function. Or with a None current account -- query, this can also be used for the journalTransactionsReport. accountTransactionsReportItems :: Query -> Query -> MixedAmount -> (MixedAmount -> MixedAmount) -> [Transaction] -> [TransactionsReportItem] accountTransactionsReportItems _ _ _ _ [] = [] accountTransactionsReportItems reportq thisacctq bal signfn (torig:ts) = case i of Just i' -> i':is Nothing -> is -- 201403: This is used for both accountTransactionsReport and journalTransactionsReport, which makes it a bit overcomplicated -- 201407: I've lost my grip on this, let's just hope for the best -- 201606: we now calculate change and balance from filtered postings, check this still works well for all callers XXX where tfiltered@Transaction{tpostings=reportps} = filterTransactionPostings reportq torig tacct = tfiltered{tdate=transactionRegisterDate reportq thisacctq tfiltered} (i,bal') = case reportps of [] -> (Nothing,bal) -- no matched postings in this transaction, skip it _ -> (Just (torig, tacct, numotheraccts > 1, otheracctstr, a, b), b) where (thisacctps, otheracctps) = partition (matchesPosting thisacctq) reportps numotheraccts = length $ nub $ map paccount otheracctps otheracctstr | thisacctq == None = summarisePostingAccounts reportps -- no current account ? summarise all matched postings | numotheraccts == 0 = summarisePostingAccounts thisacctps -- only postings to current account ? summarise those | otherwise = summarisePostingAccounts otheracctps -- summarise matched postings to other account(s) a = signfn $ negate $ sum $ map pamount thisacctps b = bal + a is = accountTransactionsReportItems reportq thisacctq bal' signfn ts -- | What is the transaction's date in the context of a particular account -- (specified with a query) and report query, as in an account register ? -- It's normally the transaction's general date, but if any posting(s) -- matched by the report query and affecting the matched account(s) have -- their own earlier dates, it's the earliest of these dates. -- Secondary transaction/posting dates are ignored. transactionRegisterDate :: Query -> Query -> Transaction -> Day transactionRegisterDate reportq thisacctq t | null thisacctps = tdate t | otherwise = minimum $ map postingDate thisacctps where reportps = tpostings $ filterTransactionPostings reportq t thisacctps = filter (matchesPosting thisacctq) reportps -- -- | Generate a short readable summary of some postings, like -- -- "from (negatives) to (positives)". -- summarisePostings :: [Posting] -> String -- summarisePostings ps = -- case (summarisePostingAccounts froms, summarisePostingAccounts tos) of -- ("",t) -> "to "++t -- (f,"") -> "from "++f -- (f,t) -> "from "++f++" to "++t -- where -- (froms,tos) = partition (fromMaybe False . isNegativeMixedAmount . pamount) ps -- | Generate a simplified summary of some postings' accounts. -- To reduce noise, if there are both real and virtual postings, show only the real ones. summarisePostingAccounts :: [Posting] -> String summarisePostingAccounts ps = (intercalate ", " . map (T.unpack . accountSummarisedName) . nub . map paccount) displayps -- XXX pack where realps = filter isReal ps displayps | null realps = ps | otherwise = realps ------------------------------------------------------------------------------- -- | Split a transactions report whose items may involve several commodities, -- into one or more single-commodity transactions reports. transactionsReportByCommodity :: TransactionsReport -> [(CommoditySymbol, TransactionsReport)] transactionsReportByCommodity tr = [(c, filterTransactionsReportByCommodity c tr) | c <- transactionsReportCommodities tr] where transactionsReportCommodities (_,items) = nub $ sort $ map acommodity $ concatMap (amounts . triAmount) items -- Remove transaction report items and item amount (and running -- balance amount) components that don't involve the specified -- commodity. Other item fields such as the transaction are left unchanged. filterTransactionsReportByCommodity :: CommoditySymbol -> TransactionsReport -> TransactionsReport filterTransactionsReportByCommodity c (label,items) = (label, fixTransactionsReportItemBalances $ concat [filterTransactionsReportItemByCommodity c i | i <- items]) where filterTransactionsReportItemByCommodity c (t,t2,s,o,a,bal) | c `elem` cs = [item'] | otherwise = [] where cs = map acommodity $ amounts a item' = (t,t2,s,o,a',bal) a' = filterMixedAmountByCommodity c a fixTransactionsReportItemBalances [] = [] fixTransactionsReportItemBalances [i] = [i] fixTransactionsReportItemBalances items = reverse $ i:(go startbal is) where i:is = reverse items startbal = filterMixedAmountByCommodity c $ triBalance i go _ [] = [] go bal ((t,t2,s,o,amt,_):is) = (t,t2,s,o,amt,bal'):go bal' is where bal' = bal + amt ------------------------------------------------------------------------------- hledger-lib-1.2/Hledger/Utils.hs0000644000000000000000000001515213066746043014743 0ustar0000000000000000{-| Standard imports and utilities which are useful everywhere, or needed low in the module hierarchy. This is the bottom of hledger's module graph. -} module Hledger.Utils (---- provide these frequently used modules - or not, for clearer api: -- module Control.Monad, -- module Data.List, -- module Data.Maybe, -- module Data.Time.Calendar, -- module Data.Time.Clock, -- module Data.Time.LocalTime, -- module Data.Tree, -- module Text.RegexPR, -- module Test.HUnit, -- module Text.Printf, ---- all of this one: module Hledger.Utils, module Hledger.Utils.Debug, module Hledger.Utils.Parse, module Hledger.Utils.Regex, module Hledger.Utils.String, module Hledger.Utils.Text, module Hledger.Utils.Test, module Hledger.Utils.Tree, -- Debug.Trace.trace, -- module Data.PPrint, -- module Hledger.Utils.UTF8IOCompat SystemString,fromSystemString,toSystemString,error',userError',usageError, -- the rest need to be done in each module I think ) where import Control.Monad (liftM) -- import Data.Char import Data.List -- import Data.Maybe -- import Data.PPrint import Data.Text (Text) import qualified Data.Text.IO as T import Data.Time.Clock import Data.Time.LocalTime -- import Data.Text (Text) -- import qualified Data.Text as T import System.Directory (getHomeDirectory) import System.FilePath((), isRelative) import System.IO -- import Text.Printf -- import qualified Data.Map as Map import Hledger.Utils.Debug import Hledger.Utils.Parse import Hledger.Utils.Regex import Hledger.Utils.String import Hledger.Utils.Text import Hledger.Utils.Test import Hledger.Utils.Tree -- import Prelude hiding (readFile,writeFile,appendFile,getContents,putStr,putStrLn) -- import Hledger.Utils.UTF8IOCompat (readFile,writeFile,appendFile,getContents,putStr,putStrLn) import Hledger.Utils.UTF8IOCompat (SystemString,fromSystemString,toSystemString,error',userError',usageError) -- tuples first3 (x,_,_) = x second3 (_,x,_) = x third3 (_,_,x) = x first4 (x,_,_,_) = x second4 (_,x,_,_) = x third4 (_,_,x,_) = x fourth4 (_,_,_,x) = x first5 (x,_,_,_,_) = x second5 (_,x,_,_,_) = x third5 (_,_,x,_,_) = x fourth5 (_,_,_,x,_) = x fifth5 (_,_,_,_,x) = x first6 (x,_,_,_,_,_) = x second6 (_,x,_,_,_,_) = x third6 (_,_,x,_,_,_) = x fourth6 (_,_,_,x,_,_) = x fifth6 (_,_,_,_,x,_) = x sixth6 (_,_,_,_,_,x) = x -- lists splitAtElement :: Eq a => a -> [a] -> [[a]] splitAtElement x l = case l of [] -> [] e:es | e==x -> split es es -> split es where split es = let (first,rest) = break (x==) es in first : splitAtElement x rest -- text -- time getCurrentLocalTime :: IO LocalTime getCurrentLocalTime = do t <- getCurrentTime tz <- getCurrentTimeZone return $ utcToLocalTime tz t getCurrentZonedTime :: IO ZonedTime getCurrentZonedTime = do t <- getCurrentTime tz <- getCurrentTimeZone return $ utcToZonedTime tz t -- misc isLeft :: Either a b -> Bool isLeft (Left _) = True isLeft _ = False isRight :: Either a b -> Bool isRight = not . isLeft -- | Apply a function the specified number of times. Possibly uses O(n) stack ? applyN :: Int -> (a -> a) -> a -> a applyN n f = (!! n) . iterate f -- from protolude, compare -- applyN :: Int -> (a -> a) -> a -> a -- applyN n f = X.foldr (.) identity (X.replicate n f) -- | Convert a possibly relative, possibly tilde-containing file path to an absolute one, -- given the current directory. ~username is not supported. Leave "-" unchanged. -- Can raise an error. expandPath :: FilePath -> FilePath -> IO FilePath -- general type sig for use in reader parsers expandPath _ "-" = return "-" expandPath curdir p = (if isRelative p then (curdir ) else id) `liftM` expandPath' p where expandPath' ('~':'/':p) = ( p) <$> getHomeDirectory expandPath' ('~':'\\':p) = ( p) <$> getHomeDirectory expandPath' ('~':_) = ioError $ userError "~USERNAME in paths is not supported" expandPath' p = return p firstJust ms = case dropWhile (==Nothing) ms of [] -> Nothing (md:_) -> md -- | Read a file in universal newline mode, handling any of the usual line ending conventions. readFile' :: FilePath -> IO Text readFile' name = do h <- openFile name ReadMode hSetNewlineMode h universalNewlineMode T.hGetContents h -- | Read a file in universal newline mode, handling any of the usual line ending conventions. readFileAnyLineEnding :: FilePath -> IO Text readFileAnyLineEnding path = do h <- openFile path ReadMode hSetNewlineMode h universalNewlineMode T.hGetContents h -- | Read the given file, or standard input if the path is "-", using -- universal newline mode. readFileOrStdinAnyLineEnding :: String -> IO Text readFileOrStdinAnyLineEnding f = do h <- fileHandle f hSetNewlineMode h universalNewlineMode T.hGetContents h where fileHandle "-" = return stdin fileHandle f = openFile f ReadMode -- | Total version of maximum, for integral types, giving 0 for an empty list. maximum' :: Integral a => [a] -> a maximum' [] = 0 maximum' xs = maximumStrict xs -- | Strict version of sum that doesn’t leak space {-# INLINABLE sumStrict #-} sumStrict :: Num a => [a] -> a sumStrict = foldl' (+) 0 -- | Strict version of maximum that doesn’t leak space {-# INLINABLE maximumStrict #-} maximumStrict :: Ord a => [a] -> a maximumStrict = foldl1' max -- | Strict version of minimum that doesn’t leak space {-# INLINABLE minimumStrict #-} minimumStrict :: Ord a => [a] -> a minimumStrict = foldl1' min -- | This is a version of sequence based on difference lists. It is -- slightly faster but we mostly use it because it uses the heap -- instead of the stack. This has the advantage that Neil Mitchell’s -- trick of limiting the stack size to discover space leaks doesn’t -- show this as a false positive. {-# INLINABLE sequence' #-} sequence' :: Monad f => [f a] -> f [a] sequence' ms = do h <- go id ms return (h []) where go h [] = return h go h (m:ms) = do x <- m go (h . (x :)) ms {-# INLINABLE mapM' #-} mapM' :: Monad f => (a -> f b) -> [a] -> f [b] mapM' f = sequence' . map f hledger-lib-1.2/Hledger/Utils/Debug.hs0000644000000000000000000002134613035510426015761 0ustar0000000000000000{-# LANGUAGE CPP, FlexibleContexts, TypeFamilies #-} -- | Debugging helpers -- more: -- http://hackage.haskell.org/packages/archive/TraceUtils/0.1.0.2/doc/html/Debug-TraceUtils.html -- http://hackage.haskell.org/packages/archive/trace-call/0.1/doc/html/Debug-TraceCall.html -- http://hackage.haskell.org/packages/archive/htrace/0.1/doc/html/Debug-HTrace.html -- http://hackage.haskell.org/packages/archive/traced/2009.7.20/doc/html/Debug-Traced.html module Hledger.Utils.Debug ( module Hledger.Utils.Debug ,module Debug.Trace #if __GLASGOW_HASKELL__ >= 704 ,ppShow #endif ) where import Control.Monad (when) import Control.Monad.IO.Class import Data.List hiding (uncons) import qualified Data.Text as T import Debug.Trace import Hledger.Utils.Parse import Safe (readDef) import System.Environment (getArgs) import System.Exit import System.IO.Unsafe (unsafePerformIO) import Text.Megaparsec import Text.Printf #if __GLASGOW_HASKELL__ >= 704 import Text.Show.Pretty (ppShow) #else -- the required pretty-show version requires GHC >= 7.4 ppShow :: Show a => a -> String ppShow = show #endif pprint :: Show a => a -> IO () pprint = putStrLn . ppShow -- | Trace (print to stderr) a showable value using a custom show function. traceWith :: (a -> String) -> a -> a traceWith f a = trace (f a) a -- | Parsec trace - show the current parsec position and next input, -- and the provided label if it's non-null. ptrace :: String -> TextParser m () ptrace msg = do pos <- getPosition next <- (T.take peeklength) `fmap` getInput let (l,c) = (sourceLine pos, sourceColumn pos) s = printf "at line %2d col %2d: %s" (unPos l) (unPos c) (show next) :: String s' = printf ("%-"++show (peeklength+30)++"s") s ++ " " ++ msg trace s' $ return () where peeklength = 30 -- | Global debug level, which controls the verbosity of debug output -- on the console. The default is 0 meaning no debug output. The -- @--debug@ command line flag sets it to 1, or @--debug=N@ sets it to -- a higher value (note: not @--debug N@ for some reason). This uses -- unsafePerformIO and can be accessed from anywhere and before normal -- command-line processing. When running with :main in GHCI, you must -- touch and reload this module to see the effect of a new --debug option. -- After command-line processing, it is also available as the @debug_@ -- field of 'Hledger.Cli.CliOptions.CliOpts'. -- {-# OPTIONS_GHC -fno-cse #-} -- {-# NOINLINE debugLevel #-} debugLevel :: Int debugLevel = case snd $ break (=="--debug") args of "--debug":[] -> 1 "--debug":n:_ -> readDef 1 n _ -> case take 1 $ filter ("--debug" `isPrefixOf`) args of ['-':'-':'d':'e':'b':'u':'g':'=':v] -> readDef 1 v _ -> 0 where args = unsafePerformIO getArgs -- | Convenience aliases for tracePrettyAt. -- Always pretty-print a message and the showable value to the console, then return it. -- ("dbg" without the 0 clashes with megaparsec 5.1). dbg0 :: Show a => String -> a -> a dbg0 = tracePrettyAt 0 -- | Pretty-print a message and the showable value to the console when the debug level is >= 1, then return it. Uses unsafePerformIO. dbg1 :: Show a => String -> a -> a dbg1 = tracePrettyAt 1 dbg2 :: Show a => String -> a -> a dbg2 = tracePrettyAt 2 dbg3 :: Show a => String -> a -> a dbg3 = tracePrettyAt 3 dbg4 :: Show a => String -> a -> a dbg4 = tracePrettyAt 4 dbg5 :: Show a => String -> a -> a dbg5 = tracePrettyAt 5 dbg6 :: Show a => String -> a -> a dbg6 = tracePrettyAt 6 dbg7 :: Show a => String -> a -> a dbg7 = tracePrettyAt 7 dbg8 :: Show a => String -> a -> a dbg8 = tracePrettyAt 8 dbg9 :: Show a => String -> a -> a dbg9 = tracePrettyAt 9 -- | Convenience aliases for tracePrettyAtIO. -- Like dbg, but convenient to insert in an IO monad. -- XXX These have a bug; they should use traceIO, not trace, -- otherwise GHC can occasionally over-optimise -- (cf lpaste a few days ago where it killed/blocked a child thread). dbg0IO :: (MonadIO m, Show a) => String -> a -> m () dbg0IO = tracePrettyAtIO 0 dbg1IO :: (MonadIO m, Show a) => String -> a -> m () dbg1IO = tracePrettyAtIO 1 dbg2IO :: (MonadIO m, Show a) => String -> a -> m () dbg2IO = tracePrettyAtIO 2 dbg3IO :: (MonadIO m, Show a) => String -> a -> m () dbg3IO = tracePrettyAtIO 3 dbg4IO :: (MonadIO m, Show a) => String -> a -> m () dbg4IO = tracePrettyAtIO 4 dbg5IO :: (MonadIO m, Show a) => String -> a -> m () dbg5IO = tracePrettyAtIO 5 dbg6IO :: (MonadIO m, Show a) => String -> a -> m () dbg6IO = tracePrettyAtIO 6 dbg7IO :: (MonadIO m, Show a) => String -> a -> m () dbg7IO = tracePrettyAtIO 7 dbg8IO :: (MonadIO m, Show a) => String -> a -> m () dbg8IO = tracePrettyAtIO 8 dbg9IO :: (MonadIO m, Show a) => String -> a -> m () dbg9IO = tracePrettyAtIO 9 -- | Pretty-print a message and a showable value to the console if the debug level is at or above the specified level. -- dbtAt 0 always prints. Otherwise, uses unsafePerformIO. tracePrettyAt :: Show a => Int -> String -> a -> a tracePrettyAt lvl = dbgppshow lvl -- tracePrettyAtM :: (Monad m, Show a) => Int -> String -> a -> m a -- tracePrettyAtM lvl lbl x = tracePrettyAt lvl lbl x `seq` return x -- XXX Could not deduce (a ~ ()) -- from the context (Show a) -- bound by the type signature for -- dbgM :: Show a => String -> a -> IO () -- at hledger/Hledger/Cli/Main.hs:200:13-42 -- ‘a’ is a rigid type variable bound by -- the type signature for dbgM :: Show a => String -> a -> IO () -- at hledger/Hledger/Cli/Main.hs:200:13 -- Expected type: String -> a -> IO () -- Actual type: String -> a -> IO a tracePrettyAtIO :: (MonadIO m, Show a) => Int -> String -> a -> m () tracePrettyAtIO lvl lbl x = liftIO $ tracePrettyAt lvl lbl x `seq` return () -- | print this string to the console before evaluating the expression, -- if the global debug level is at or above the specified level. Uses unsafePerformIO. -- dbgtrace :: Int -> String -> a -> a -- dbgtrace level -- | debugLevel >= level = trace -- | otherwise = flip const -- | Print a showable value to the console, with a message, if the -- debug level is at or above the specified level (uses -- unsafePerformIO). -- Values are displayed with show, all on one line, which is hard to read. -- dbgshow :: Show a => Int -> String -> a -> a -- dbgshow level -- | debugLevel >= level = ltrace -- | otherwise = flip const -- | Print a showable value to the console, with a message, if the -- debug level is at or above the specified level (uses -- unsafePerformIO). -- Values are displayed with ppShow, each field/constructor on its own line. dbgppshow :: Show a => Int -> String -> a -> a dbgppshow level | level > 0 && debugLevel < level = flip const | otherwise = \s a -> let p = ppShow a ls = lines p nlorspace | length ls > 1 = "\n" | otherwise = " " ++ take (10 - length s) (repeat ' ') ls' | length ls > 1 = map (" "++) ls | otherwise = ls in trace (s++":"++nlorspace++intercalate "\n" ls') a -- -- | Print a showable value to the console, with a message, if the -- -- debug level is at or above the specified level (uses -- -- unsafePerformIO). -- -- Values are displayed with pprint. Field names are not shown, but the -- -- output is compact with smart line wrapping, long data elided, -- -- and slow calculations timed out. -- dbgpprint :: Data a => Int -> String -> a -> a -- dbgpprint level msg a -- | debugLevel >= level = unsafePerformIO $ do -- pprint a >>= putStrLn . ((msg++": \n") ++) . show -- return a -- | otherwise = a -- | Like dbg, then exit the program. Uses unsafePerformIO. dbgExit :: Show a => String -> a -> a dbgExit msg = const (unsafePerformIO exitFailure) . dbg0 msg -- | Print a message and parsec debug info (parse position and next -- input) to the console when the debug level is at or above -- this level. Uses unsafePerformIO. -- pdbgAt :: GenParser m => Float -> String -> m () pdbg :: Int -> String -> TextParser m () pdbg level msg = when (level <= debugLevel) $ ptrace msg -- | Like dbg, but writes the output to "debug.log" in the current directory. -- Uses unsafePerformIO. Can fail due to log file contention if called too quickly -- ("*** Exception: debug.log: openFile: resource busy (file is locked)"). dbglog :: Show a => String -> a -> a dbglog label a = (unsafePerformIO $ appendFile "debug.log" $ label ++ ": " ++ ppShow a ++ "\n") `seq` a hledger-lib-1.2/Hledger/Utils/Parse.hs0000644000000000000000000000511713035510426016003 0ustar0000000000000000{-# LANGUAGE FlexibleContexts, TypeFamilies #-} module Hledger.Utils.Parse where import Control.Monad.Except import Data.Char import Data.List import Data.Text (Text) import Text.Megaparsec hiding (State) import Data.Functor.Identity (Identity(..)) import Text.Printf import Control.Monad.State.Strict (StateT, evalStateT) import Hledger.Data.Types import Hledger.Utils.UTF8IOCompat (error') -- | A parser of strict text with generic user state, monad and return type. type TextParser m a = ParsecT Dec Text m a type JournalStateParser m a = StateT Journal (ParsecT Dec Text m) a type JournalParser a = StateT Journal (ParsecT Dec Text Identity) a -- | A journal parser that runs in IO and can throw an error mid-parse. type ErroringJournalParser m a = StateT Journal (ParsecT Dec Text (ExceptT String m)) a -- | Backtracking choice, use this when alternatives share a prefix. -- Consumes no input if all choices fail. choice' :: [TextParser m a] -> TextParser m a choice' = choice . map Text.Megaparsec.try -- | Backtracking choice, use this when alternatives share a prefix. -- Consumes no input if all choices fail. choiceInState :: [StateT s (ParsecT Dec Text m) a] -> StateT s (ParsecT Dec Text m) a choiceInState = choice . map Text.Megaparsec.try parsewith :: Parsec e Text a -> Text -> Either (ParseError Char e) a parsewith p = runParser p "" parsewithString :: Parsec e String a -> String -> Either (ParseError Char e) a parsewithString p = runParser p "" parseWithState :: Monad m => st -> StateT st (ParsecT Dec Text m) a -> Text -> m (Either (ParseError Char Dec) a) parseWithState ctx p s = runParserT (evalStateT p ctx) "" s parseWithState' :: (Stream s, ErrorComponent e) => st -> StateT st (ParsecT e s Identity) a -> s -> (Either (ParseError (Token s) e) a) parseWithState' ctx p s = runParser (evalStateT p ctx) "" s fromparse :: (Show t, Show e) => Either (ParseError t e) a -> a fromparse = either parseerror id parseerror :: (Show t, Show e) => ParseError t e -> a parseerror e = error' $ showParseError e showParseError :: (Show t, Show e) => ParseError t e -> String showParseError e = "parse error at " ++ show e showDateParseError :: (Show t, Show e) => ParseError t e -> String showDateParseError e = printf "date parse error (%s)" (intercalate ", " $ tail $ lines $ show e) nonspace :: TextParser m Char nonspace = satisfy (not . isSpace) spacenonewline :: (Stream s, Char ~ Token s) => ParsecT Dec s m Char spacenonewline = satisfy (`elem` " \v\f\t") restofline :: TextParser m String restofline = anyChar `manyTill` newline eolof :: TextParser m () eolof = (newline >> return ()) <|> eof hledger-lib-1.2/Hledger/Utils/Regex.hs0000644000000000000000000001142413035210046015774 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables #-} {-| Easy regular expression helpers, currently based on regex-tdfa. These should: - be cross-platform, not requiring C libraries - support unicode - support extended regular expressions - support replacement, with backreferences etc. - support splitting - have mnemonic names - have simple monomorphic types - work with simple strings Regex strings are automatically compiled into regular expressions the first time they are seen, and these are cached. If you use a huge number of unique regular expressions this might lead to increased memory usage. Several functions have memoised variants (*Memo), which also trade space for time. Current limitations: - (?i) and similar are not supported -} module Hledger.Utils.Regex ( -- * type aliases Regexp ,Replacement -- * standard regex operations ,regexMatches ,regexMatchesCI ,regexReplace ,regexReplaceCI ,regexReplaceMemo ,regexReplaceCIMemo ,regexReplaceBy ,regexReplaceByCI ) where import Data.Array import Data.Char import Data.List (foldl') import Data.MemoUgly (memo) import Text.Regex.TDFA ( Regex, CompOption(..), ExecOption(..), defaultCompOpt, defaultExecOpt, makeRegexOpts, AllMatches(getAllMatches), match, (=~), MatchText ) -- import Hledger.Utils.Debug import Hledger.Utils.UTF8IOCompat (error') -- | Regular expression. Extended regular expression-ish syntax ? But does not support eg (?i) syntax. type Regexp = String -- | A replacement pattern. May include numeric backreferences (\N). type Replacement = String -- | Convert our string-based regexps to real ones. Can fail if the -- string regexp is malformed. toRegex :: Regexp -> Regex toRegex = memo (makeRegexOpts compOpt execOpt) toRegexCI :: Regexp -> Regex toRegexCI = memo (makeRegexOpts compOpt{caseSensitive=False} execOpt) compOpt :: CompOption compOpt = defaultCompOpt execOpt :: ExecOption execOpt = defaultExecOpt -- regexMatch' :: RegexContext Regexp String a => Regexp -> String -> a -- regexMatch' r s = s =~ (toRegex r) regexMatches :: Regexp -> String -> Bool regexMatches = flip (=~) regexMatchesCI :: Regexp -> String -> Bool regexMatchesCI r = match (toRegexCI r) -- | Replace all occurrences of the regexp, transforming each match with the given function. regexReplaceBy :: Regexp -> (String -> String) -> String -> String regexReplaceBy r = replaceAllBy (toRegex r) regexReplaceByCI :: Regexp -> (String -> String) -> String -> String regexReplaceByCI r = replaceAllBy (toRegexCI r) -- | Replace all occurrences of the regexp with the replacement -- pattern. The replacement pattern supports numeric backreferences -- (\N) but no other RE syntax. regexReplace :: Regexp -> Replacement -> String -> String regexReplace re = replaceRegex (toRegex re) regexReplaceCI :: Regexp -> Replacement -> String -> String regexReplaceCI re = replaceRegex (toRegexCI re) -- | A memoising version of regexReplace. Caches the result for each -- search pattern, replacement pattern, target string tuple. regexReplaceMemo :: Regexp -> Replacement -> String -> String regexReplaceMemo re repl = memo (regexReplace re repl) regexReplaceCIMemo :: Regexp -> Replacement -> String -> String regexReplaceCIMemo re repl = memo (regexReplaceCI re repl) -- replaceRegex :: Regex -> Replacement -> String -> String replaceRegex re repl s = foldl (replaceMatch repl) s (reverse $ match re s :: [MatchText String]) replaceMatch :: Replacement -> String -> MatchText String -> String replaceMatch replpat s matchgroups = pre ++ repl ++ post where ((_,(off,len)):_) = elems matchgroups -- groups should have 0-based indexes, and there should always be at least one, since this is a match (pre, post') = splitAt off s post = drop len post' repl = replaceAllBy (toRegex "\\\\[0-9]+") (replaceBackReference matchgroups) replpat replaceBackReference :: MatchText String -> String -> String replaceBackReference grps ('\\':s@(_:_)) | all isDigit s = case read s of n | n `elem` indices grps -> fst (grps ! n) _ -> error' $ "no match group exists for backreference \"\\"++s++"\"" replaceBackReference _ s = error' $ "replaceBackReference called on non-numeric-backreference \""++s++"\", shouldn't happen" -- -- http://stackoverflow.com/questions/9071682/replacement-substition-with-haskell-regex-libraries : -- | Replace all occurrences of a regexp in a string, transforming each match with the given function. replaceAllBy :: Regex -> (String -> String) -> String -> String replaceAllBy re f s = start end where (_, end, start) = foldl' go (0, s, id) $ (getAllMatches $ match re s :: [(Int, Int)]) go (ind,read,write) (off,len) = let (skip, start) = splitAt (off - ind) read (matched, remaining) = splitAt len start in (off + len, remaining, write . (skip++) . (f matched ++)) hledger-lib-1.2/Hledger/Utils/String.hs0000644000000000000000000003340013035510426016173 0ustar0000000000000000-- | String formatting helpers, starting to get a bit out of control. module Hledger.Utils.String ( -- * misc lowercase, uppercase, underline, stripbrackets, unbracket, -- quoting quoteIfNeeded, singleQuoteIfNeeded, -- quotechars, -- whitespacechars, escapeQuotes, words', unwords', -- * single-line layout strip, lstrip, rstrip, chomp, elideLeft, elideRight, formatString, -- * multi-line layout concatTopPadded, concatBottomPadded, concatOneLine, vConcatLeftAligned, vConcatRightAligned, padtop, padbottom, padleft, padright, cliptopleft, fitto, -- * wide-character-aware layout charWidth, strWidth, takeWidth, fitString, fitStringMulti, padLeftWide, padRightWide ) where import Data.Char import Data.List import Text.Megaparsec import Text.Printf (printf) import Hledger.Utils.Parse import Hledger.Utils.Regex lowercase, uppercase :: String -> String lowercase = map toLower uppercase = map toUpper -- | Remove leading and trailing whitespace. strip :: String -> String strip = lstrip . rstrip -- | Remove leading whitespace. lstrip :: String -> String lstrip = dropWhile isSpace -- | Remove trailing whitespace. rstrip :: String -> String rstrip = reverse . lstrip . reverse -- | Remove trailing newlines/carriage returns. chomp :: String -> String chomp = reverse . dropWhile (`elem` "\r\n") . reverse stripbrackets :: String -> String stripbrackets = dropWhile (`elem` "([") . reverse . dropWhile (`elem` "])") . reverse :: String -> String elideLeft :: Int -> String -> String elideLeft width s = if length s > width then ".." ++ reverse (take (width - 2) $ reverse s) else s elideRight :: Int -> String -> String elideRight width s = if length s > width then take (width - 2) s ++ ".." else s -- | Clip and pad a string to a minimum & maximum width, and/or left/right justify it. -- Works on multi-line strings too (but will rewrite non-unix line endings). formatString :: Bool -> Maybe Int -> Maybe Int -> String -> String formatString leftJustified minwidth maxwidth s = intercalate "\n" $ map (printf fmt) $ lines s where justify = if leftJustified then "-" else "" minwidth' = maybe "" show minwidth maxwidth' = maybe "" (("."++).show) maxwidth fmt = "%" ++ justify ++ minwidth' ++ maxwidth' ++ "s" underline :: String -> String underline s = s' ++ replicate (length s) '-' ++ "\n" where s' | last s == '\n' = s | otherwise = s ++ "\n" -- | Double-quote this string if it contains whitespace, single quotes -- or double-quotes, escaping the quotes as needed. quoteIfNeeded :: String -> String quoteIfNeeded s | any (`elem` s) (quotechars++whitespacechars) = "\"" ++ escapeDoubleQuotes s ++ "\"" | otherwise = s -- | Single-quote this string if it contains whitespace or double-quotes. -- No good for strings containing single quotes. singleQuoteIfNeeded :: String -> String singleQuoteIfNeeded s | any (`elem` s) whitespacechars = "'"++s++"'" | otherwise = s quotechars, whitespacechars :: [Char] quotechars = "'\"" whitespacechars = " \t\n\r" escapeDoubleQuotes :: String -> String escapeDoubleQuotes = regexReplace "\"" "\"" escapeQuotes :: String -> String escapeQuotes = regexReplace "([\"'])" "\\1" -- | Quote-aware version of words - don't split on spaces which are inside quotes. -- NB correctly handles "a'b" but not "''a''". Can raise an error if parsing fails. words' :: String -> [String] words' "" = [] words' s = map stripquotes $ fromparse $ parsewithString p s where p = do ss <- (singleQuotedPattern <|> doubleQuotedPattern <|> pattern) `sepBy` some spacenonewline -- eof return ss pattern = many (noneOf whitespacechars) singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf "'") doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf "\"") -- | Quote-aware version of unwords - single-quote strings which contain whitespace unwords' :: [String] -> String unwords' = unwords . map quoteIfNeeded -- | Strip one matching pair of single or double quotes on the ends of a string. stripquotes :: String -> String stripquotes s = if isSingleQuoted s || isDoubleQuoted s then init $ tail s else s isSingleQuoted s@(_:_:_) = head s == '\'' && last s == '\'' isSingleQuoted _ = False isDoubleQuoted s@(_:_:_) = head s == '"' && last s == '"' isDoubleQuoted _ = False unbracket :: String -> String unbracket s | (head s == '[' && last s == ']') || (head s == '(' && last s == ')') = init $ tail s | otherwise = s -- | Join several multi-line strings as side-by-side rectangular strings of the same height, top-padded. -- Treats wide characters as double width. concatTopPadded :: [String] -> String concatTopPadded strs = intercalate "\n" $ map concat $ transpose padded where lss = map lines strs h = maximum $ map length lss ypad ls = replicate (difforzero h (length ls)) "" ++ ls xpad ls = map (padLeftWide w) ls where w | null ls = 0 | otherwise = maximum $ map strWidth ls padded = map (xpad . ypad) lss -- | Join several multi-line strings as side-by-side rectangular strings of the same height, bottom-padded. -- Treats wide characters as double width. concatBottomPadded :: [String] -> String concatBottomPadded strs = intercalate "\n" $ map concat $ transpose padded where lss = map lines strs h = maximum $ map length lss ypad ls = ls ++ replicate (difforzero h (length ls)) "" xpad ls = map (padRightWide w) ls where w | null ls = 0 | otherwise = maximum $ map strWidth ls padded = map (xpad . ypad) lss -- | Join multi-line strings horizontally, after compressing each of -- them to a single line with a comma and space between each original line. concatOneLine :: [String] -> String concatOneLine strs = concat $ map ((intercalate ", ").lines) strs -- | Join strings vertically, left-aligned and right-padded. vConcatLeftAligned :: [String] -> String vConcatLeftAligned ss = intercalate "\n" $ map showfixedwidth ss where showfixedwidth = printf (printf "%%-%ds" width) width = maximum $ map length ss -- | Join strings vertically, right-aligned and left-padded. vConcatRightAligned :: [String] -> String vConcatRightAligned ss = intercalate "\n" $ map showfixedwidth ss where showfixedwidth = printf (printf "%%%ds" width) width = maximum $ map length ss -- | Convert a multi-line string to a rectangular string top-padded to the specified height. padtop :: Int -> String -> String padtop h s = intercalate "\n" xpadded where ls = lines s sh = length ls sw | null ls = 0 | otherwise = maximum $ map length ls ypadded = replicate (difforzero h sh) "" ++ ls xpadded = map (padleft sw) ypadded -- | Convert a multi-line string to a rectangular string bottom-padded to the specified height. padbottom :: Int -> String -> String padbottom h s = intercalate "\n" xpadded where ls = lines s sh = length ls sw | null ls = 0 | otherwise = maximum $ map length ls ypadded = ls ++ replicate (difforzero h sh) "" xpadded = map (padleft sw) ypadded difforzero :: (Num a, Ord a) => a -> a -> a difforzero a b = maximum [(a - b), 0] -- | Convert a multi-line string to a rectangular string left-padded to the specified width. -- Treats wide characters as double width. padleft :: Int -> String -> String padleft w "" = concat $ replicate w " " padleft w s = intercalate "\n" $ map (printf (printf "%%%ds" w)) $ lines s -- | Convert a multi-line string to a rectangular string right-padded to the specified width. -- Treats wide characters as double width. padright :: Int -> String -> String padright w "" = concat $ replicate w " " padright w s = intercalate "\n" $ map (printf (printf "%%-%ds" w)) $ lines s -- | Clip a multi-line string to the specified width and height from the top left. cliptopleft :: Int -> Int -> String -> String cliptopleft w h = intercalate "\n" . take h . map (take w) . lines -- | Clip and pad a multi-line string to fill the specified width and height. fitto :: Int -> Int -> String -> String fitto w h s = intercalate "\n" $ take h $ rows ++ repeat blankline where rows = map (fit w) $ lines s fit w = take w . (++ repeat ' ') blankline = replicate w ' ' -- Functions below treat wide (eg CJK) characters as double-width. -- | General-purpose wide-char-aware single-line string layout function. -- It can left- or right-pad a short string to a minimum width. -- It can left- or right-clip a long string to a maximum width, optionally inserting an ellipsis (the third argument). -- It clips and pads on the right when the fourth argument is true, otherwise on the left. -- It treats wide characters as double width. fitString :: Maybe Int -> Maybe Int -> Bool -> Bool -> String -> String fitString mminwidth mmaxwidth ellipsify rightside s = (clip . pad) s where clip :: String -> String clip s = case mmaxwidth of Just w | strWidth s > w -> case rightside of True -> takeWidth (w - length ellipsis) s ++ ellipsis False -> ellipsis ++ reverse (takeWidth (w - length ellipsis) $ reverse s) | otherwise -> s where ellipsis = if ellipsify then ".." else "" Nothing -> s pad :: String -> String pad s = case mminwidth of Just w | sw < w -> case rightside of True -> s ++ replicate (w - sw) ' ' False -> replicate (w - sw) ' ' ++ s | otherwise -> s Nothing -> s where sw = strWidth s -- | A version of fitString that works on multi-line strings, -- separate for now to avoid breakage. -- This will rewrite any line endings to unix newlines. fitStringMulti :: Maybe Int -> Maybe Int -> Bool -> Bool -> String -> String fitStringMulti mminwidth mmaxwidth ellipsify rightside s = (intercalate "\n" . map (fitString mminwidth mmaxwidth ellipsify rightside) . lines) s -- | Left-pad a string to the specified width. -- Treats wide characters as double width. -- Works on multi-line strings too (but will rewrite non-unix line endings). padLeftWide :: Int -> String -> String padLeftWide w "" = replicate w ' ' padLeftWide w s = intercalate "\n" $ map (fitString (Just w) Nothing False False) $ lines s -- XXX not yet replaceable by -- padLeftWide w = fitStringMulti (Just w) Nothing False False -- | Right-pad a string to the specified width. -- Treats wide characters as double width. -- Works on multi-line strings too (but will rewrite non-unix line endings). padRightWide :: Int -> String -> String padRightWide w "" = replicate w ' ' padRightWide w s = intercalate "\n" $ map (fitString (Just w) Nothing False True) $ lines s -- XXX not yet replaceable by -- padRightWide w = fitStringMulti (Just w) Nothing False True -- | Double-width-character-aware string truncation. Take as many -- characters as possible from a string without exceeding the -- specified width. Eg takeWidth 3 "りんご" = "り". takeWidth :: Int -> String -> String takeWidth _ "" = "" takeWidth 0 _ = "" takeWidth w (c:cs) | cw <= w = c:takeWidth (w-cw) cs | otherwise = "" where cw = charWidth c -- from Pandoc (copyright John MacFarlane, GPL) -- see also http://unicode.org/reports/tr11/#Description -- | Calculate the designated render width of a string, taking into -- account wide characters and line breaks (the longest line within a -- multi-line string determines the width ). strWidth :: String -> Int strWidth "" = 0 strWidth s = maximum $ map (foldr (\a b -> charWidth a + b) 0) $ lines s -- | Get the designated render width of a character: 0 for a combining -- character, 1 for a regular character, 2 for a wide character. -- (Wide characters are rendered as exactly double width in apps and -- fonts that support it.) (From Pandoc.) charWidth :: Char -> Int charWidth c = case c of _ | c < '\x0300' -> 1 | c >= '\x0300' && c <= '\x036F' -> 0 -- combining | c >= '\x0370' && c <= '\x10FC' -> 1 | c >= '\x1100' && c <= '\x115F' -> 2 | c >= '\x1160' && c <= '\x11A2' -> 1 | c >= '\x11A3' && c <= '\x11A7' -> 2 | c >= '\x11A8' && c <= '\x11F9' -> 1 | c >= '\x11FA' && c <= '\x11FF' -> 2 | c >= '\x1200' && c <= '\x2328' -> 1 | c >= '\x2329' && c <= '\x232A' -> 2 | c >= '\x232B' && c <= '\x2E31' -> 1 | c >= '\x2E80' && c <= '\x303E' -> 2 | c == '\x303F' -> 1 | c >= '\x3041' && c <= '\x3247' -> 2 | c >= '\x3248' && c <= '\x324F' -> 1 -- ambiguous | c >= '\x3250' && c <= '\x4DBF' -> 2 | c >= '\x4DC0' && c <= '\x4DFF' -> 1 | c >= '\x4E00' && c <= '\xA4C6' -> 2 | c >= '\xA4D0' && c <= '\xA95F' -> 1 | c >= '\xA960' && c <= '\xA97C' -> 2 | c >= '\xA980' && c <= '\xABF9' -> 1 | c >= '\xAC00' && c <= '\xD7FB' -> 2 | c >= '\xD800' && c <= '\xDFFF' -> 1 | c >= '\xE000' && c <= '\xF8FF' -> 1 -- ambiguous | c >= '\xF900' && c <= '\xFAFF' -> 2 | c >= '\xFB00' && c <= '\xFDFD' -> 1 | c >= '\xFE00' && c <= '\xFE0F' -> 1 -- ambiguous | c >= '\xFE10' && c <= '\xFE19' -> 2 | c >= '\xFE20' && c <= '\xFE26' -> 1 | c >= '\xFE30' && c <= '\xFE6B' -> 2 | c >= '\xFE70' && c <= '\xFEFF' -> 1 | c >= '\xFF01' && c <= '\xFF60' -> 2 | c >= '\xFF61' && c <= '\x16A38' -> 1 | c >= '\x1B000' && c <= '\x1B001' -> 2 | c >= '\x1D000' && c <= '\x1F1FF' -> 1 | c >= '\x1F200' && c <= '\x1F251' -> 2 | c >= '\x1F300' && c <= '\x1F773' -> 1 | c >= '\x20000' && c <= '\x3FFFD' -> 2 | otherwise -> 1 hledger-lib-1.2/Hledger/Utils/Test.hs0000644000000000000000000000332113035210046015636 0ustar0000000000000000module Hledger.Utils.Test where import Test.HUnit import Text.Megaparsec -- | Get a Test's label, or the empty string. testName :: Test -> String testName (TestLabel n _) = n testName _ = "" -- | Flatten a Test containing TestLists into a list of single tests. flattenTests :: Test -> [Test] flattenTests (TestLabel _ t@(TestList _)) = flattenTests t flattenTests (TestList ts) = concatMap flattenTests ts flattenTests t = [t] -- | Filter TestLists in a Test, recursively, preserving the structure. filterTests :: (Test -> Bool) -> Test -> Test filterTests p (TestLabel l ts) = TestLabel l (filterTests p ts) filterTests p (TestList ts) = TestList $ filter (any p . flattenTests) $ map (filterTests p) ts filterTests _ t = t -- | Simple way to assert something is some expected value, with no label. is :: (Eq a, Show a) => a -> a -> Assertion a `is` e = assertEqual "" e a -- | Assert a parse result is successful, printing the parse error on failure. assertParse :: (Show t, Show e) => (Either (ParseError t e) a) -> Assertion assertParse parse = either (assertFailure.show) (const (return ())) parse -- | Assert a parse result is successful, printing the parse error on failure. assertParseFailure :: (Either (ParseError t e) a) -> Assertion assertParseFailure parse = either (const $ return ()) (const $ assertFailure "parse should not have succeeded") parse -- | Assert a parse result is some expected value, printing the parse error on failure. assertParseEqual :: (Show a, Eq a, Show t, Show e) => (Either (ParseError t e) a) -> a -> Assertion assertParseEqual parse expected = either (assertFailure.show) (`is` expected) parse printParseError :: (Show a) => a -> IO () printParseError e = do putStr "parse error at "; print e hledger-lib-1.2/Hledger/Utils/Text.hs0000644000000000000000000003754613035210046015663 0ustar0000000000000000-- | Text formatting helpers, ported from String as needed. -- There may be better alternatives out there. {-# LANGUAGE OverloadedStrings #-} module Hledger.Utils.Text -- ( -- -- * misc -- lowercase, -- uppercase, -- underline, -- stripbrackets, -- unbracket, -- -- quoting -- quoteIfSpaced, -- quoteIfNeeded, -- singleQuoteIfNeeded, -- -- quotechars, -- -- whitespacechars, -- escapeDoubleQuotes, -- escapeSingleQuotes, -- escapeQuotes, -- words', -- unwords', -- stripquotes, -- isSingleQuoted, -- isDoubleQuoted, -- -- * single-line layout -- strip, -- lstrip, -- rstrip, -- chomp, -- elideLeft, -- elideRight, -- formatString, -- -- * multi-line layout -- concatTopPadded, -- concatBottomPadded, -- concatOneLine, -- vConcatLeftAligned, -- vConcatRightAligned, -- padtop, -- padbottom, -- padleft, -- padright, -- cliptopleft, -- fitto, -- -- * wide-character-aware layout -- strWidth, -- textTakeWidth, -- fitString, -- fitStringMulti, -- padLeftWide, -- padRightWide -- ) where -- import Data.Char import Data.List import Data.Monoid import Data.Text (Text) import qualified Data.Text as T -- import Text.Parsec -- import Text.Printf (printf) -- import Hledger.Utils.Parse -- import Hledger.Utils.Regex import Hledger.Utils.String (charWidth) -- lowercase, uppercase :: String -> String -- lowercase = map toLower -- uppercase = map toUpper -- | Remove leading and trailing whitespace. textstrip :: Text -> Text textstrip = textlstrip . textrstrip -- | Remove leading whitespace. textlstrip :: Text -> Text textlstrip = T.dropWhile (`elem` (" \t" :: String)) :: Text -> Text -- XXX isSpace ? -- | Remove trailing whitespace. textrstrip = T.reverse . textlstrip . T.reverse textrstrip :: Text -> Text -- -- | Remove trailing newlines/carriage returns. -- chomp :: String -> String -- chomp = reverse . dropWhile (`elem` "\r\n") . reverse -- stripbrackets :: String -> String -- stripbrackets = dropWhile (`elem` "([") . reverse . dropWhile (`elem` "])") . reverse :: String -> String -- elideLeft :: Int -> String -> String -- elideLeft width s = -- if length s > width then ".." ++ reverse (take (width - 2) $ reverse s) else s textElideRight :: Int -> Text -> Text textElideRight width t = if T.length t > width then T.take (width - 2) t <> ".." else t -- -- | Clip and pad a string to a minimum & maximum width, and/or left/right justify it. -- -- Works on multi-line strings too (but will rewrite non-unix line endings). -- formatString :: Bool -> Maybe Int -> Maybe Int -> String -> String -- formatString leftJustified minwidth maxwidth s = intercalate "\n" $ map (printf fmt) $ lines s -- where -- justify = if leftJustified then "-" else "" -- minwidth' = maybe "" show minwidth -- maxwidth' = maybe "" (("."++).show) maxwidth -- fmt = "%" ++ justify ++ minwidth' ++ maxwidth' ++ "s" -- underline :: String -> String -- underline s = s' ++ replicate (length s) '-' ++ "\n" -- where s' -- | last s == '\n' = s -- | otherwise = s ++ "\n" -- | Wrap a string in double quotes, and \-prefix any embedded single -- quotes, if it contains whitespace and is not already single- or -- double-quoted. quoteIfSpaced :: T.Text -> T.Text quoteIfSpaced s | isSingleQuoted s || isDoubleQuoted s = s | not $ any (`elem` (T.unpack s)) whitespacechars = s | otherwise = "'"<>escapeSingleQuotes s<>"'" -- -- | Wrap a string in double quotes, and \-prefix any embedded single -- -- quotes, if it contains whitespace and is not already single- or -- -- double-quoted. -- quoteIfSpaced :: String -> String -- quoteIfSpaced s | isSingleQuoted s || isDoubleQuoted s = s -- | not $ any (`elem` s) whitespacechars = s -- | otherwise = "'"++escapeSingleQuotes s++"'" -- -- | Double-quote this string if it contains whitespace, single quotes -- -- or double-quotes, escaping the quotes as needed. -- quoteIfNeeded :: T.Text -> T.Text -- quoteIfNeeded s | any (`elem` T.unpack s) (quotechars++whitespacechars) = "\"" <> escapeDoubleQuotes s <> "\"" -- | otherwise = s -- -- | Single-quote this string if it contains whitespace or double-quotes. -- -- No good for strings containing single quotes. -- singleQuoteIfNeeded :: String -> String -- singleQuoteIfNeeded s | any (`elem` s) whitespacechars = "'"++s++"'" -- | otherwise = s quotechars, whitespacechars :: [Char] quotechars = "'\"" whitespacechars = " \t\n\r" escapeDoubleQuotes :: T.Text -> T.Text escapeDoubleQuotes = T.replace "\"" "\"" escapeSingleQuotes :: T.Text -> T.Text escapeSingleQuotes = T.replace "'" "\'" -- escapeQuotes :: String -> String -- escapeQuotes = regexReplace "([\"'])" "\\1" -- -- | Quote-aware version of words - don't split on spaces which are inside quotes. -- -- NB correctly handles "a'b" but not "''a''". Can raise an error if parsing fails. -- words' :: String -> [String] -- words' "" = [] -- words' s = map stripquotes $ fromparse $ parsewith p s -- where -- p = do ss <- (singleQuotedPattern <|> doubleQuotedPattern <|> pattern) `sepBy` many1 spacenonewline -- -- eof -- return ss -- pattern = many (noneOf whitespacechars) -- singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf "'") -- doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf "\"") -- -- | Quote-aware version of unwords - single-quote strings which contain whitespace -- unwords' :: [Text] -> Text -- unwords' = T.unwords . map quoteIfNeeded -- | Strip one matching pair of single or double quotes on the ends of a string. stripquotes :: Text -> Text stripquotes s = if isSingleQuoted s || isDoubleQuoted s then T.init $ T.tail s else s isSingleQuoted :: Text -> Bool isSingleQuoted s = T.length (T.take 2 s) == 2 && T.head s == '\'' && T.last s == '\'' isDoubleQuoted :: Text -> Bool isDoubleQuoted s = T.length (T.take 2 s) == 2 && T.head s == '"' && T.last s == '"' textUnbracket :: Text -> Text textUnbracket s | (T.head s == '[' && T.last s == ']') || (T.head s == '(' && T.last s == ')') = T.init $ T.tail s | otherwise = s -- | Join several multi-line strings as side-by-side rectangular strings of the same height, top-padded. -- Treats wide characters as double width. textConcatTopPadded :: [Text] -> Text textConcatTopPadded ts = T.intercalate "\n" $ map T.concat $ transpose padded where lss = map T.lines ts :: [[Text]] h = maximum $ map length lss ypad ls = replicate (difforzero h (length ls)) "" ++ ls xpad ls = map (textPadLeftWide w) ls where w | null ls = 0 | otherwise = maximum $ map textWidth ls padded = map (xpad . ypad) lss :: [[Text]] -- -- | Join several multi-line strings as side-by-side rectangular strings of the same height, bottom-padded. -- -- Treats wide characters as double width. -- concatBottomPadded :: [String] -> String -- concatBottomPadded strs = intercalate "\n" $ map concat $ transpose padded -- where -- lss = map lines strs -- h = maximum $ map length lss -- ypad ls = ls ++ replicate (difforzero h (length ls)) "" -- xpad ls = map (padRightWide w) ls where w | null ls = 0 -- | otherwise = maximum $ map strWidth ls -- padded = map (xpad . ypad) lss -- -- | Join multi-line strings horizontally, after compressing each of -- -- them to a single line with a comma and space between each original line. -- concatOneLine :: [String] -> String -- concatOneLine strs = concat $ map ((intercalate ", ").lines) strs -- -- | Join strings vertically, left-aligned and right-padded. -- vConcatLeftAligned :: [String] -> String -- vConcatLeftAligned ss = intercalate "\n" $ map showfixedwidth ss -- where -- showfixedwidth = printf (printf "%%-%ds" width) -- width = maximum $ map length ss -- -- | Join strings vertically, right-aligned and left-padded. -- vConcatRightAligned :: [String] -> String -- vConcatRightAligned ss = intercalate "\n" $ map showfixedwidth ss -- where -- showfixedwidth = printf (printf "%%%ds" width) -- width = maximum $ map length ss -- -- | Convert a multi-line string to a rectangular string top-padded to the specified height. -- padtop :: Int -> String -> String -- padtop h s = intercalate "\n" xpadded -- where -- ls = lines s -- sh = length ls -- sw | null ls = 0 -- | otherwise = maximum $ map length ls -- ypadded = replicate (difforzero h sh) "" ++ ls -- xpadded = map (padleft sw) ypadded -- -- | Convert a multi-line string to a rectangular string bottom-padded to the specified height. -- padbottom :: Int -> String -> String -- padbottom h s = intercalate "\n" xpadded -- where -- ls = lines s -- sh = length ls -- sw | null ls = 0 -- | otherwise = maximum $ map length ls -- ypadded = ls ++ replicate (difforzero h sh) "" -- xpadded = map (padleft sw) ypadded difforzero :: (Num a, Ord a) => a -> a -> a difforzero a b = maximum [(a - b), 0] -- -- | Convert a multi-line string to a rectangular string left-padded to the specified width. -- -- Treats wide characters as double width. -- padleft :: Int -> String -> String -- padleft w "" = concat $ replicate w " " -- padleft w s = intercalate "\n" $ map (printf (printf "%%%ds" w)) $ lines s -- -- | Convert a multi-line string to a rectangular string right-padded to the specified width. -- -- Treats wide characters as double width. -- padright :: Int -> String -> String -- padright w "" = concat $ replicate w " " -- padright w s = intercalate "\n" $ map (printf (printf "%%-%ds" w)) $ lines s -- -- | Clip a multi-line string to the specified width and height from the top left. -- cliptopleft :: Int -> Int -> String -> String -- cliptopleft w h = intercalate "\n" . take h . map (take w) . lines -- -- | Clip and pad a multi-line string to fill the specified width and height. -- fitto :: Int -> Int -> String -> String -- fitto w h s = intercalate "\n" $ take h $ rows ++ repeat blankline -- where -- rows = map (fit w) $ lines s -- fit w = take w . (++ repeat ' ') -- blankline = replicate w ' ' -- -- Functions below treat wide (eg CJK) characters as double-width. -- | General-purpose wide-char-aware single-line text layout function. -- It can left- or right-pad a short string to a minimum width. -- It can left- or right-clip a long string to a maximum width, optionally inserting an ellipsis (the third argument). -- It clips and pads on the right when the fourth argument is true, otherwise on the left. -- It treats wide characters as double width. fitText :: Maybe Int -> Maybe Int -> Bool -> Bool -> Text -> Text fitText mminwidth mmaxwidth ellipsify rightside s = (clip . pad) s where clip :: Text -> Text clip s = case mmaxwidth of Just w | textWidth s > w -> case rightside of True -> textTakeWidth (w - T.length ellipsis) s <> ellipsis False -> ellipsis <> T.reverse (textTakeWidth (w - T.length ellipsis) $ T.reverse s) | otherwise -> s where ellipsis = if ellipsify then ".." else "" Nothing -> s pad :: Text -> Text pad s = case mminwidth of Just w | sw < w -> case rightside of True -> s <> T.replicate (w - sw) " " False -> T.replicate (w - sw) " " <> s | otherwise -> s Nothing -> s where sw = textWidth s -- -- | A version of fitString that works on multi-line strings, -- -- separate for now to avoid breakage. -- -- This will rewrite any line endings to unix newlines. -- fitStringMulti :: Maybe Int -> Maybe Int -> Bool -> Bool -> String -> String -- fitStringMulti mminwidth mmaxwidth ellipsify rightside s = -- (intercalate "\n" . map (fitString mminwidth mmaxwidth ellipsify rightside) . lines) s -- | Left-pad a text to the specified width. -- Treats wide characters as double width. -- Works on multi-line texts too (but will rewrite non-unix line endings). textPadLeftWide :: Int -> Text -> Text textPadLeftWide w "" = T.replicate w " " textPadLeftWide w s = T.intercalate "\n" $ map (fitText (Just w) Nothing False False) $ T.lines s -- XXX not yet replaceable by -- padLeftWide w = fitStringMulti (Just w) Nothing False False -- | Right-pad a string to the specified width. -- Treats wide characters as double width. -- Works on multi-line strings too (but will rewrite non-unix line endings). textPadRightWide :: Int -> Text -> Text textPadRightWide w "" = T.replicate w " " textPadRightWide w s = T.intercalate "\n" $ map (fitText (Just w) Nothing False True) $ T.lines s -- XXX not yet replaceable by -- padRightWide w = fitStringMulti (Just w) Nothing False True -- | Double-width-character-aware string truncation. Take as many -- characters as possible from a string without exceeding the -- specified width. Eg textTakeWidth 3 "りんご" = "り". textTakeWidth :: Int -> Text -> Text textTakeWidth _ "" = "" textTakeWidth 0 _ = "" textTakeWidth w t | not (T.null t), let c = T.head t, let cw = charWidth c, cw <= w = T.cons c $ textTakeWidth (w-cw) (T.tail t) | otherwise = "" -- -- from Pandoc (copyright John MacFarlane, GPL) -- -- see also http://unicode.org/reports/tr11/#Description -- | Calculate the designated render width of a string, taking into -- account wide characters and line breaks (the longest line within a -- multi-line string determines the width ). textWidth :: Text -> Int textWidth "" = 0 textWidth s = maximum $ map (T.foldr (\a b -> charWidth a + b) 0) $ T.lines s -- -- | Get the designated render width of a character: 0 for a combining -- -- character, 1 for a regular character, 2 for a wide character. -- -- (Wide characters are rendered as exactly double width in apps and -- -- fonts that support it.) (From Pandoc.) -- charWidth :: Char -> Int -- charWidth c = -- case c of -- _ | c < '\x0300' -> 1 -- | c >= '\x0300' && c <= '\x036F' -> 0 -- combining -- | c >= '\x0370' && c <= '\x10FC' -> 1 -- | c >= '\x1100' && c <= '\x115F' -> 2 -- | c >= '\x1160' && c <= '\x11A2' -> 1 -- | c >= '\x11A3' && c <= '\x11A7' -> 2 -- | c >= '\x11A8' && c <= '\x11F9' -> 1 -- | c >= '\x11FA' && c <= '\x11FF' -> 2 -- | c >= '\x1200' && c <= '\x2328' -> 1 -- | c >= '\x2329' && c <= '\x232A' -> 2 -- | c >= '\x232B' && c <= '\x2E31' -> 1 -- | c >= '\x2E80' && c <= '\x303E' -> 2 -- | c == '\x303F' -> 1 -- | c >= '\x3041' && c <= '\x3247' -> 2 -- | c >= '\x3248' && c <= '\x324F' -> 1 -- ambiguous -- | c >= '\x3250' && c <= '\x4DBF' -> 2 -- | c >= '\x4DC0' && c <= '\x4DFF' -> 1 -- | c >= '\x4E00' && c <= '\xA4C6' -> 2 -- | c >= '\xA4D0' && c <= '\xA95F' -> 1 -- | c >= '\xA960' && c <= '\xA97C' -> 2 -- | c >= '\xA980' && c <= '\xABF9' -> 1 -- | c >= '\xAC00' && c <= '\xD7FB' -> 2 -- | c >= '\xD800' && c <= '\xDFFF' -> 1 -- | c >= '\xE000' && c <= '\xF8FF' -> 1 -- ambiguous -- | c >= '\xF900' && c <= '\xFAFF' -> 2 -- | c >= '\xFB00' && c <= '\xFDFD' -> 1 -- | c >= '\xFE00' && c <= '\xFE0F' -> 1 -- ambiguous -- | c >= '\xFE10' && c <= '\xFE19' -> 2 -- | c >= '\xFE20' && c <= '\xFE26' -> 1 -- | c >= '\xFE30' && c <= '\xFE6B' -> 2 -- | c >= '\xFE70' && c <= '\xFEFF' -> 1 -- | c >= '\xFF01' && c <= '\xFF60' -> 2 -- | c >= '\xFF61' && c <= '\x16A38' -> 1 -- | c >= '\x1B000' && c <= '\x1B001' -> 2 -- | c >= '\x1D000' && c <= '\x1F1FF' -> 1 -- | c >= '\x1F200' && c <= '\x1F251' -> 2 -- | c >= '\x1F300' && c <= '\x1F773' -> 1 -- | c >= '\x20000' && c <= '\x3FFFD' -> 2 -- | otherwise -> 1 hledger-lib-1.2/Hledger/Utils/Tree.hs0000644000000000000000000000525613035210046015627 0ustar0000000000000000module Hledger.Utils.Tree where -- import Data.Char import Data.List (foldl') import qualified Data.Map as M import Data.Tree -- import Text.Megaparsec -- import Text.Printf import Hledger.Utils.Regex -- import Hledger.Utils.UTF8IOCompat (error') -- standard tree helpers root = rootLabel subs = subForest branches = subForest -- | List just the leaf nodes of a tree leaves :: Tree a -> [a] leaves (Node v []) = [v] leaves (Node _ branches) = concatMap leaves branches -- | get the sub-tree rooted at the first (left-most, depth-first) occurrence -- of the specified node value subtreeat :: Eq a => a -> Tree a -> Maybe (Tree a) subtreeat v t | root t == v = Just t | otherwise = subtreeinforest v $ subs t -- | get the sub-tree for the specified node value in the first tree in -- forest in which it occurs. subtreeinforest :: Eq a => a -> [Tree a] -> Maybe (Tree a) subtreeinforest _ [] = Nothing subtreeinforest v (t:ts) = case (subtreeat v t) of Just t' -> Just t' Nothing -> subtreeinforest v ts -- | remove all nodes past a certain depth treeprune :: Int -> Tree a -> Tree a treeprune 0 t = Node (root t) [] treeprune d t = Node (root t) (map (treeprune $ d-1) $ branches t) -- | apply f to all tree nodes treemap :: (a -> b) -> Tree a -> Tree b treemap f t = Node (f $ root t) (map (treemap f) $ branches t) -- | remove all subtrees whose nodes do not fulfill predicate treefilter :: (a -> Bool) -> Tree a -> Tree a treefilter f t = Node (root t) (map (treefilter f) $ filter (treeany f) $ branches t) -- | is predicate true in any node of tree ? treeany :: (a -> Bool) -> Tree a -> Bool treeany f t = f (root t) || any (treeany f) (branches t) -- treedrop -- remove the leaves which do fulfill predicate. -- treedropall -- do this repeatedly. -- | show a compact ascii representation of a tree showtree :: Show a => Tree a -> String showtree = unlines . filter (regexMatches "[^ \\|]") . lines . drawTree . treemap show -- | show a compact ascii representation of a forest showforest :: Show a => Forest a -> String showforest = concatMap showtree -- | An efficient-to-build tree suggested by Cale Gibbard, probably -- better than accountNameTreeFrom. newtype FastTree a = T (M.Map a (FastTree a)) deriving (Show, Eq, Ord) emptyTree = T M.empty mergeTrees :: (Ord a) => FastTree a -> FastTree a -> FastTree a mergeTrees (T m) (T m') = T (M.unionWith mergeTrees m m') treeFromPath :: [a] -> FastTree a treeFromPath [] = T M.empty treeFromPath (x:xs) = T (M.singleton x (treeFromPath xs)) treeFromPaths :: (Ord a) => [[a]] -> FastTree a treeFromPaths = foldl' mergeTrees emptyTree . map treeFromPath hledger-lib-1.2/Hledger/Utils/UTF8IOCompat.hs0000644000000000000000000001024513066746043017063 0ustar0000000000000000{-# LANGUAGE CPP #-} {- | UTF-8 aware string IO functions that will work across multiple platforms and GHC versions. Includes code from Text.Pandoc.UTF8 ((C) 2010 John MacFarlane). Example usage: import Prelude hiding (readFile,writeFile,appendFile,getContents,putStr,putStrLn) import UTF8IOCompat (readFile,writeFile,appendFile,getContents,putStr,putStrLn) import UTF8IOCompat (SystemString,fromSystemString,toSystemString,error',userError') 2013/4/10 update: we now trust that current GHC versions & platforms do the right thing, so this file is a no-op and on its way to being removed. Not carefully tested. -} -- TODO obsolete ? module Hledger.Utils.UTF8IOCompat ( readFile, writeFile, appendFile, getContents, hGetContents, putStr, putStrLn, hPutStr, hPutStrLn, -- SystemString, fromSystemString, toSystemString, error', userError', usageError, ) where -- import Control.Monad (liftM) -- import qualified Data.ByteString.Lazy as B -- import qualified Data.ByteString.Lazy.Char8 as B8 -- import qualified Data.ByteString.Lazy.UTF8 as U8 (toString, fromString) import Prelude hiding (readFile, writeFile, appendFile, getContents, putStr, putStrLn) import System.IO -- (Handle) -- #if __GLASGOW_HASKELL__ < 702 -- import Codec.Binary.UTF8.String as UTF8 (decodeString, encodeString, isUTF8Encoded) -- import System.Info (os) -- #endif -- bom :: B.ByteString -- bom = B.pack [0xEF, 0xBB, 0xBF] -- stripBOM :: B.ByteString -> B.ByteString -- stripBOM s | bom `B.isPrefixOf` s = B.drop 3 s -- stripBOM s = s -- readFile :: FilePath -> IO String -- readFile = liftM (U8.toString . stripBOM) . B.readFile -- writeFile :: FilePath -> String -> IO () -- writeFile f = B.writeFile f . U8.fromString -- appendFile :: FilePath -> String -> IO () -- appendFile f = B.appendFile f . U8.fromString -- getContents :: IO String -- getContents = liftM (U8.toString . stripBOM) B.getContents -- hGetContents :: Handle -> IO String -- hGetContents h = liftM (U8.toString . stripBOM) (B.hGetContents h) -- putStr :: String -> IO () -- putStr = bs_putStr . U8.fromString -- putStrLn :: String -> IO () -- putStrLn = bs_putStrLn . U8.fromString -- hPutStr :: Handle -> String -> IO () -- hPutStr h = bs_hPutStr h . U8.fromString -- hPutStrLn :: Handle -> String -> IO () -- hPutStrLn h = bs_hPutStrLn h . U8.fromString -- -- span GHC versions including 6.12.3 - 7.4.1: -- bs_putStr = B8.putStr -- bs_putStrLn = B8.putStrLn -- bs_hPutStr = B8.hPut -- bs_hPutStrLn h bs = B8.hPut h bs >> B8.hPut h (B.singleton 0x0a) -- | A string received from or being passed to the operating system, such -- as a file path, command-line argument, or environment variable name or -- value. With GHC versions before 7.2 on some platforms (posix) these are -- typically encoded. When converting, we assume the encoding is UTF-8 (cf -- ). type SystemString = String -- | Convert a system string to an ordinary string, decoding from UTF-8 if -- it appears to be UTF8-encoded and GHC version is less than 7.2. fromSystemString :: SystemString -> String -- #if __GLASGOW_HASKELL__ < 702 -- fromSystemString s = if UTF8.isUTF8Encoded s then UTF8.decodeString s else s -- #else fromSystemString = id -- #endif -- | Convert a unicode string to a system string, encoding with UTF-8 if -- we are on a posix platform with GHC < 7.2. toSystemString :: String -> SystemString -- #if __GLASGOW_HASKELL__ < 702 -- toSystemString = case os of -- "unix" -> UTF8.encodeString -- "linux" -> UTF8.encodeString -- "darwin" -> UTF8.encodeString -- _ -> id -- #else toSystemString = id -- #endif -- | A SystemString-aware version of error. error' :: String -> a error' = #if __GLASGOW_HASKELL__ < 800 -- (easier than if base < 4.9) error . toSystemString #else errorWithoutStackTrace . toSystemString #endif -- | A SystemString-aware version of userError. userError' :: String -> IOError userError' = userError . toSystemString -- | A SystemString-aware version of error that adds a usage hint. usageError :: String -> a usageError = error' . (++ " (use -h to see usage)") hledger-lib-1.2/doc/hledger_csv.50000644000000000000000000001407413067574770015066 0ustar0000000000000000 .TH "hledger_csv" "5" "March 2017" "hledger 1.2" "hledger User Manuals" .SH NAME .PP CSV \- how hledger reads CSV data, and the CSV rules file format .SH DESCRIPTION .PP hledger can read CSV files, converting each CSV record into a journal entry (transaction), if you provide some conversion hints in a "rules file". This file should be named like the CSV file with an additional \f[C]\&.rules\f[] suffix (eg: \f[C]mybank.csv.rules\f[]); or, you can specify the file with \f[C]\-\-rules\-file\ PATH\f[]. hledger will create it if necessary, with some default rules which you\[aq]ll need to adjust. At minimum, the rules file must specify the \f[C]date\f[] and \f[C]amount\f[] fields. For an example, see How to read CSV files. .PP To learn about \f[I]exporting\f[] CSV, see CSV output. .SH CSV RULES .PP The following six kinds of rule can appear in the rules file, in any order. Blank lines and lines beginning with \f[C]#\f[] or \f[C];\f[] are ignored. .SS skip .PP \f[C]skip\f[]\f[I]\f[C]N\f[]\f[] .PP Skip this number of CSV records at the beginning. You\[aq]ll need this whenever your CSV data contains header lines. Eg: .IP .nf \f[C] #\ ignore\ the\ first\ CSV\ line skip\ 1 \f[] .fi .SS date\-format .PP \f[C]date\-format\f[]\f[I]\f[C]DATEFMT\f[]\f[] .PP When your CSV date fields are not formatted like \f[C]YYYY/MM/DD\f[] (or \f[C]YYYY\-MM\-DD\f[] or \f[C]YYYY.MM.DD\f[]), you\[aq]ll need to specify the format. DATEFMT is a strptime\-like date parsing pattern, which must parse the date field values completely. Examples: .IP .nf \f[C] #\ for\ dates\ like\ "6/11/2013": date\-format\ %\-d/%\-m/%Y \f[] .fi .IP .nf \f[C] #\ for\ dates\ like\ "11/06/2013": date\-format\ %m/%d/%Y \f[] .fi .IP .nf \f[C] #\ for\ dates\ like\ "2013\-Nov\-06": date\-format\ %Y\-%h\-%d \f[] .fi .IP .nf \f[C] #\ for\ dates\ like\ "11/6/2013\ 11:32\ PM": date\-format\ %\-m/%\-d/%Y\ %l:%M\ %p \f[] .fi .SS field list .PP \f[C]fields\f[]\f[I]\f[C]FIELDNAME1\f[]\f[], \f[I]\f[C]FIELDNAME2\f[]\f[]... .PP This (a) names the CSV fields, in order (names may not contain whitespace; uninteresting names may be left blank), and (b) assigns them to journal entry fields if you use any of these standard field names: \f[C]date\f[], \f[C]date2\f[], \f[C]status\f[], \f[C]code\f[], \f[C]description\f[], \f[C]comment\f[], \f[C]account1\f[], \f[C]account2\f[], \f[C]amount\f[], \f[C]amount\-in\f[], \f[C]amount\-out\f[], \f[C]currency\f[]. Eg: .IP .nf \f[C] #\ use\ the\ 1st,\ 2nd\ and\ 4th\ CSV\ fields\ as\ the\ entry\[aq]s\ date,\ description\ and\ amount, #\ and\ give\ the\ 7th\ and\ 8th\ fields\ meaningful\ names\ for\ later\ reference: # #\ CSV\ field: #\ \ \ \ \ \ 1\ \ \ \ \ 2\ \ \ \ \ \ \ \ \ \ \ \ 3\ 4\ \ \ \ \ \ \ 5\ 6\ 7\ \ \ \ \ \ \ \ \ \ 8 #\ entry\ field: fields\ date,\ description,\ ,\ amount,\ ,\ ,\ somefield,\ anotherfield \f[] .fi .SS field assignment .PP \f[I]\f[C]ENTRYFIELDNAME\f[]\f[] \f[I]\f[C]FIELDVALUE\f[]\f[] .PP This sets a journal entry field (one of the standard names above) to the given text value, which can include CSV field values interpolated by name (\f[C]%CSVFIELDNAME\f[]) or 1\-based position (\f[C]%N\f[]). Eg: .IP .nf \f[C] #\ set\ the\ amount\ to\ the\ 4th\ CSV\ field\ with\ "USD\ "\ prepended amount\ USD\ %4 \f[] .fi .IP .nf \f[C] #\ combine\ three\ fields\ to\ make\ a\ comment\ (containing\ two\ tags) comment\ note:\ %somefield\ \-\ %anotherfield,\ date:\ %1 \f[] .fi .PP Field assignments can be used instead of or in addition to a field list. .SS conditional block .PP \f[C]if\f[] \f[I]\f[C]PATTERN\f[]\f[] .PD 0 .P .PD \ \ \ \ \f[I]\f[C]FIELDASSIGNMENTS\f[]\f[]... .PP \f[C]if\f[] .PD 0 .P .PD \f[I]\f[C]PATTERN\f[]\f[] .PD 0 .P .PD \f[I]\f[C]PATTERN\f[]\f[]... .PD 0 .P .PD \ \ \ \ \f[I]\f[C]FIELDASSIGNMENTS\f[]\f[]... .PP This applies one or more field assignments, only to those CSV records matched by one of the PATTERNs. The patterns are case\-insensitive regular expressions which match anywhere within the whole CSV record (it\[aq]s not yet possible to match within a specific field). When there are multiple patterns they can be written on separate lines, unindented. The field assignments are on separate lines indented by at least one space. Examples: .IP .nf \f[C] #\ if\ the\ CSV\ record\ contains\ "groceries",\ set\ account2\ to\ "expenses:groceries" if\ groceries \ account2\ expenses:groceries \f[] .fi .IP .nf \f[C] #\ if\ the\ CSV\ record\ contains\ any\ of\ these\ patterns,\ set\ account2\ and\ comment\ as\ shown if monthly\ service\ fee atm\ transaction\ fee banking\ thru\ software \ account2\ expenses:business:banking \ comment\ \ XXX\ deductible\ ?\ check\ it \f[] .fi .SS include .PP \f[C]include\f[]\f[I]\f[C]RULESFILE\f[]\f[] .PP Include another rules file at this point. \f[C]RULESFILE\f[] is either an absolute file path or a path relative to the current file\[aq]s directory. Eg: .IP .nf \f[C] #\ rules\ reused\ with\ several\ CSV\ files include\ common.rules \f[] .fi .SH TIPS .PP Each generated journal entry will have two postings, to \f[C]account1\f[] and \f[C]account2\f[] respectively. Currently it\[aq]s not possible to generate entries with more than two postings. .PP If the CSV has debit/credit amounts in separate fields, assign to the \f[C]amount\-in\f[] and \f[C]amount\-out\f[] pseudo fields instead of \f[C]amount\f[]. .PP If the CSV has the currency in a separate field, assign that to the \f[C]currency\f[] pseudo field which will be automatically prepended to the amount. (Or you can do the same thing with a field assignment.) .PP If an amount value is parenthesised, it will be de\-parenthesised and sign\-flipped automatically. .PP The generated journal entries will be sorted by date. The original order of same\-day entries will be preserved, usually. .SH "REPORTING BUGS" Report bugs at http://bugs.hledger.org (or on the #hledger IRC channel or hledger mail list) .SH AUTHORS Simon Michael and contributors .SH COPYRIGHT Copyright (C) 2007-2016 Simon Michael. .br Released under GNU GPL v3 or later. .SH SEE ALSO hledger(1), hledger\-ui(1), hledger\-web(1), hledger\-api(1), hledger_csv(5), hledger_journal(5), hledger_timeclock(5), hledger_timedot(5), ledger(1) http://hledger.org hledger-lib-1.2/doc/hledger_csv.5.info0000644000000000000000000001401713067574765016021 0ustar0000000000000000This is hledger_csv.5.info, produced by makeinfo version 6.0 from stdin.  File: hledger_csv.5.info, Node: Top, Next: CSV RULES, Up: (dir) hledger_csv(5) hledger 1.2 ************************** hledger can read CSV files, converting each CSV record into a journal entry (transaction), if you provide some conversion hints in a "rules file". This file should be named like the CSV file with an additional '.rules' suffix (eg: 'mybank.csv.rules'); or, you can specify the file with '--rules-file PATH'. hledger will create it if necessary, with some default rules which you'll need to adjust. At minimum, the rules file must specify the 'date' and 'amount' fields. For an example, see How to read CSV files. To learn about _exporting_ CSV, see CSV output. * Menu: * CSV RULES:: * TIPS::  File: hledger_csv.5.info, Node: CSV RULES, Next: TIPS, Prev: Top, Up: Top 1 CSV RULES *********** The following six kinds of rule can appear in the rules file, in any order. Blank lines and lines beginning with '#' or ';' are ignored. * Menu: * skip:: * date-format:: * field list:: * field assignment:: * conditional block:: * include::  File: hledger_csv.5.info, Node: skip, Next: date-format, Up: CSV RULES 1.1 skip ======== 'skip'_'N'_ Skip this number of CSV records at the beginning. You'll need this whenever your CSV data contains header lines. Eg: # ignore the first CSV line skip 1  File: hledger_csv.5.info, Node: date-format, Next: field list, Prev: skip, Up: CSV RULES 1.2 date-format =============== 'date-format'_'DATEFMT'_ When your CSV date fields are not formatted like 'YYYY/MM/DD' (or 'YYYY-MM-DD' or 'YYYY.MM.DD'), you'll need to specify the format. DATEFMT is a strptime-like date parsing pattern, which must parse the date field values completely. Examples: # for dates like "6/11/2013": date-format %-d/%-m/%Y # for dates like "11/06/2013": date-format %m/%d/%Y # for dates like "2013-Nov-06": date-format %Y-%h-%d # for dates like "11/6/2013 11:32 PM": date-format %-m/%-d/%Y %l:%M %p  File: hledger_csv.5.info, Node: field list, Next: field assignment, Prev: date-format, Up: CSV RULES 1.3 field list ============== 'fields'_'FIELDNAME1'_, _'FIELDNAME2'_... This (a) names the CSV fields, in order (names may not contain whitespace; uninteresting names may be left blank), and (b) assigns them to journal entry fields if you use any of these standard field names: 'date', 'date2', 'status', 'code', 'description', 'comment', 'account1', 'account2', 'amount', 'amount-in', 'amount-out', 'currency'. Eg: # use the 1st, 2nd and 4th CSV fields as the entry's date, description and amount, # and give the 7th and 8th fields meaningful names for later reference: # # CSV field: # 1 2 3 4 5 6 7 8 # entry field: fields date, description, , amount, , , somefield, anotherfield  File: hledger_csv.5.info, Node: field assignment, Next: conditional block, Prev: field list, Up: CSV RULES 1.4 field assignment ==================== _'ENTRYFIELDNAME'_ _'FIELDVALUE'_ This sets a journal entry field (one of the standard names above) to the given text value, which can include CSV field values interpolated by name ('%CSVFIELDNAME') or 1-based position ('%N'). Eg: # set the amount to the 4th CSV field with "USD " prepended amount USD %4 # combine three fields to make a comment (containing two tags) comment note: %somefield - %anotherfield, date: %1 Field assignments can be used instead of or in addition to a field list.  File: hledger_csv.5.info, Node: conditional block, Next: include, Prev: field assignment, Up: CSV RULES 1.5 conditional block ===================== 'if' _'PATTERN'_ _'FIELDASSIGNMENTS'_... 'if' _'PATTERN'_ _'PATTERN'_... _'FIELDASSIGNMENTS'_... This applies one or more field assignments, only to those CSV records matched by one of the PATTERNs. The patterns are case-insensitive regular expressions which match anywhere within the whole CSV record (it's not yet possible to match within a specific field). When there are multiple patterns they can be written on separate lines, unindented. The field assignments are on separate lines indented by at least one space. Examples: # if the CSV record contains "groceries", set account2 to "expenses:groceries" if groceries account2 expenses:groceries # if the CSV record contains any of these patterns, set account2 and comment as shown if monthly service fee atm transaction fee banking thru software account2 expenses:business:banking comment XXX deductible ? check it  File: hledger_csv.5.info, Node: include, Prev: conditional block, Up: CSV RULES 1.6 include =========== 'include'_'RULESFILE'_ Include another rules file at this point. 'RULESFILE' is either an absolute file path or a path relative to the current file's directory. Eg: # rules reused with several CSV files include common.rules  File: hledger_csv.5.info, Node: TIPS, Prev: CSV RULES, Up: Top 2 TIPS ****** Each generated journal entry will have two postings, to 'account1' and 'account2' respectively. Currently it's not possible to generate entries with more than two postings. If the CSV has debit/credit amounts in separate fields, assign to the 'amount-in' and 'amount-out' pseudo fields instead of 'amount'. If the CSV has the currency in a separate field, assign that to the 'currency' pseudo field which will be automatically prepended to the amount. (Or you can do the same thing with a field assignment.) If an amount value is parenthesised, it will be de-parenthesised and sign-flipped automatically. The generated journal entries will be sorted by date. The original order of same-day entries will be preserved, usually.  Tag Table: Node: Top74 Node: CSV RULES800 Ref: #csv-rules906 Node: skip1149 Ref: #skip1245 Node: date-format1417 Ref: #date-format1546 Node: field list2052 Ref: #field-list2191 Node: field assignment2886 Ref: #field-assignment3043 Node: conditional block3547 Ref: #conditional-block3703 Node: include4599 Ref: #include4710 Node: TIPS4941 Ref: #tips5025  End Tag Table hledger-lib-1.2/doc/hledger_csv.5.txt0000644000000000000000000001400213067574770015673 0ustar0000000000000000 hledger_csv(5) hledger User Manuals hledger_csv(5) NAME CSV - how hledger reads CSV data, and the CSV rules file format DESCRIPTION hledger can read CSV files, converting each CSV record into a journal entry (transaction), if you provide some conversion hints in a "rules file". This file should be named like the CSV file with an additional .rules suffix (eg: mybank.csv.rules); or, you can specify the file with --rules-file PATH. hledger will create it if necessary, with some default rules which you'll need to adjust. At minimum, the rules file must specify the date and amount fields. For an example, see How to read CSV files. To learn about exporting CSV, see CSV output. CSV RULES The following six kinds of rule can appear in the rules file, in any order. Blank lines and lines beginning with # or ; are ignored. skip skipN Skip this number of CSV records at the beginning. You'll need this whenever your CSV data contains header lines. Eg: # ignore the first CSV line skip 1 date-format date-formatDATEFMT When your CSV date fields are not formatted like YYYY/MM/DD (or YYYY-MM-DD or YYYY.MM.DD), you'll need to specify the format. DATEFMT is a strptime-like date parsing pattern, which must parse the date field values completely. Examples: # for dates like "6/11/2013": date-format %-d/%-m/%Y # for dates like "11/06/2013": date-format %m/%d/%Y # for dates like "2013-Nov-06": date-format %Y-%h-%d # for dates like "11/6/2013 11:32 PM": date-format %-m/%-d/%Y %l:%M %p field list fieldsFIELDNAME1, FIELDNAME2... This (a) names the CSV fields, in order (names may not contain white- space; uninteresting names may be left blank), and (b) assigns them to journal entry fields if you use any of these standard field names: date, date2, status, code, description, comment, account1, account2, amount, amount-in, amount-out, currency. Eg: # use the 1st, 2nd and 4th CSV fields as the entry's date, description and amount, # and give the 7th and 8th fields meaningful names for later reference: # # CSV field: # 1 2 3 4 5 6 7 8 # entry field: fields date, description, , amount, , , somefield, anotherfield field assignment ENTRYFIELDNAME FIELDVALUE This sets a journal entry field (one of the standard names above) to the given text value, which can include CSV field values interpolated by name (%CSVFIELDNAME) or 1-based position (%N). Eg: # set the amount to the 4th CSV field with "USD " prepended amount USD %4 # combine three fields to make a comment (containing two tags) comment note: %somefield - %anotherfield, date: %1 Field assignments can be used instead of or in addition to a field list. conditional block if PATTERN FIELDASSIGNMENTS... if PATTERN PATTERN... FIELDASSIGNMENTS... This applies one or more field assignments, only to those CSV records matched by one of the PATTERNs. The patterns are case-insensitive reg- ular expressions which match anywhere within the whole CSV record (it's not yet possible to match within a specific field). When there are multiple patterns they can be written on separate lines, unindented. The field assignments are on separate lines indented by at least one space. Examples: # if the CSV record contains "groceries", set account2 to "expenses:groceries" if groceries account2 expenses:groceries # if the CSV record contains any of these patterns, set account2 and comment as shown if monthly service fee atm transaction fee banking thru software account2 expenses:business:banking comment XXX deductible ? check it include includeRULESFILE Include another rules file at this point. RULESFILE is either an abso- lute file path or a path relative to the current file's directory. Eg: # rules reused with several CSV files include common.rules TIPS Each generated journal entry will have two postings, to account1 and account2 respectively. Currently it's not possible to generate entries with more than two postings. If the CSV has debit/credit amounts in separate fields, assign to the amount-in and amount-out pseudo fields instead of amount. If the CSV has the currency in a separate field, assign that to the currency pseudo field which will be automatically prepended to the amount. (Or you can do the same thing with a field assignment.) If an amount value is parenthesised, it will be de-parenthesised and sign-flipped automatically. The generated journal entries will be sorted by date. The original order of same-day entries will be preserved, usually. REPORTING BUGS Report bugs at http://bugs.hledger.org (or on the #hledger IRC channel or hledger mail list) AUTHORS Simon Michael and contributors COPYRIGHT Copyright (C) 2007-2016 Simon Michael. Released under GNU GPL v3 or later. SEE ALSO hledger(1), hledger-ui(1), hledger-web(1), hledger-api(1), hledger_csv(5), hledger_journal(5), hledger_timeclock(5), hledger_time- dot(5), ledger(1) http://hledger.org hledger 1.2 March 2017 hledger_csv(5) hledger-lib-1.2/doc/hledger_journal.50000644000000000000000000007545013067574771015753 0ustar0000000000000000.\"t .TH "hledger_journal" "5" "March 2017" "hledger 1.2" "hledger User Manuals" .SH NAME .PP Journal \- hledger\[aq]s default file format, representing a General Journal .SH DESCRIPTION .PP hledger\[aq]s usual data source is a plain text file containing journal entries in hledger journal format. This file represents a standard accounting general journal. I use file names ending in \f[C]\&.journal\f[], but that\[aq]s not required. The journal file contains a number of transaction entries, each describing a transfer of money (or any commodity) between two or more named accounts, in a simple format readable by both hledger and humans. .PP hledger\[aq]s journal format is a compatible subset, mostly, of ledger\[aq]s journal format, so hledger can work with compatible ledger journal files as well. It\[aq]s safe, and encouraged, to run both hledger and ledger on the same journal file, eg to validate the results you\[aq]re getting. .PP You can use hledger without learning any more about this file; just use the add or web commands to create and update it. Many users, though, also edit the journal file directly with a text editor, perhaps assisted by the helper modes for emacs or vim. .PP Here\[aq]s an example: .IP .nf \f[C] ;\ A\ sample\ journal\ file.\ This\ is\ a\ comment. 2008/01/01\ income\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ ;\ <\-\ transaction\[aq]s\ first\ line\ starts\ in\ column\ 0,\ contains\ date\ and\ description \ \ \ \ assets:bank:checking\ \ $1\ \ \ \ ;\ <\-\ posting\ lines\ start\ with\ whitespace,\ each\ contains\ an\ account\ name \ \ \ \ income:salary\ \ \ \ \ \ \ \ $\-1\ \ \ \ ;\ \ \ \ followed\ by\ at\ least\ two\ spaces\ and\ an\ amount 2008/06/01\ gift \ \ \ \ assets:bank:checking\ \ $1\ \ \ \ ;\ <\-\ at\ least\ two\ postings\ in\ a\ transaction \ \ \ \ income:gifts\ \ \ \ \ \ \ \ \ $\-1\ \ \ \ ;\ <\-\ their\ amounts\ must\ balance\ to\ 0 2008/06/02\ save \ \ \ \ assets:bank:saving\ \ \ \ $1 \ \ \ \ assets:bank:checking\ \ \ \ \ \ \ \ ;\ <\-\ one\ amount\ may\ be\ omitted;\ here\ $\-1\ is\ inferred 2008/06/03\ eat\ &\ shop\ \ \ \ \ \ \ \ \ \ \ ;\ <\-\ description\ can\ be\ anything \ \ \ \ expenses:food\ \ \ \ \ \ \ \ \ $1 \ \ \ \ expenses:supplies\ \ \ \ \ $1\ \ \ \ ;\ <\-\ this\ transaction\ debits\ two\ expense\ accounts \ \ \ \ assets:cash\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ ;\ <\-\ $\-2\ inferred 2008/12/31\ *\ pay\ off\ \ \ \ \ \ \ \ \ \ \ \ ;\ <\-\ an\ optional\ *\ or\ !\ after\ the\ date\ means\ "cleared"\ (or\ anything\ you\ want) \ \ \ \ liabilities:debts\ \ \ \ \ $1 \ \ \ \ assets:bank:checking \f[] .fi .SH FILE FORMAT .SS Transactions .PP Transactions are represented by journal entries. Each begins with a simple date in column 0, followed by three optional fields with spaces between them: .IP \[bu] 2 a status flag, which can be empty or \f[C]!\f[] or \f[C]*\f[] (meaning "uncleared", "pending" and "cleared", or whatever you want) .IP \[bu] 2 a transaction code (eg a check number), .IP \[bu] 2 and/or a description .PP then some number of postings, of some amount to some account. Each posting is on its own line, consisting of: .IP \[bu] 2 indentation of one or more spaces (or tabs) .IP \[bu] 2 optionally, a \f[C]!\f[] or \f[C]*\f[] status flag followed by a space .IP \[bu] 2 an account name, optionally containing single spaces .IP \[bu] 2 optionally, two or more spaces or tabs followed by an amount .PP Usually there are two or more postings, though one or none is also possible. The posting amounts within a transaction must always balance, ie add up to 0. Optionally one amount can be left blank, in which case it will be inferred. .SS Dates .SS Simple dates .PP Within a journal file, transaction dates use Y/M/D (or Y\-M\-D or Y.M.D) Leading zeros are optional. The year may be omitted, in which case it will be inferred from the context \- the current transaction, the default year set with a default year directive, or the current date when the command is run. Some examples: \f[C]2010/01/31\f[], \f[C]1/31\f[], \f[C]2010\-01\-31\f[], \f[C]2010.1.31\f[]. .SS Secondary dates .PP Real\-life transactions sometimes involve more than one date \- eg the date you write a cheque, and the date it clears in your bank. When you want to model this, eg for more accurate balances, you can specify individual posting dates, which I recommend. Or, you can use the secondary dates (aka auxiliary/effective dates) feature, supported for compatibility with Ledger. .PP A secondary date can be written after the primary date, separated by an equals sign. The primary date, on the left, is used by default; the secondary date, on the right, is used when the \f[C]\-\-date2\f[] flag is specified (\f[C]\-\-aux\-date\f[] or \f[C]\-\-effective\f[] also work). .PP The meaning of secondary dates is up to you, but it\[aq]s best to follow a consistent rule. Eg write the bank\[aq]s clearing date as primary, and when needed, the date the transaction was initiated as secondary. .PP Here\[aq]s an example. Note that a secondary date will use the year of the primary date if unspecified. .IP .nf \f[C] 2010/2/23=2/19\ movie\ ticket \ \ expenses:cinema\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $10 \ \ assets:checking \f[] .fi .IP .nf \f[C] $\ hledger\ register\ checking 2010/02/23\ movie\ ticket\ \ \ \ \ \ \ \ \ assets:checking\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $\-10\ \ \ \ \ \ \ \ \ $\-10 \f[] .fi .IP .nf \f[C] $\ hledger\ register\ checking\ \-\-date2 2010/02/19\ movie\ ticket\ \ \ \ \ \ \ \ \ assets:checking\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $\-10\ \ \ \ \ \ \ \ \ $\-10 \f[] .fi .PP Secondary dates require some effort; you must use them consistently in your journal entries and remember whether to use or not use the \f[C]\-\-date2\f[] flag for your reports. They are included in hledger for Ledger compatibility, but posting dates are a more powerful and less confusing alternative. .SS Posting dates .PP You can give individual postings a different date from their parent transaction, by adding a posting comment containing a tag (see below) like \f[C]date:DATE\f[]. This is probably the best way to control posting dates precisely. Eg in this example the expense should appear in May reports, and the deduction from checking should be reported on 6/1 for easy bank reconciliation: .IP .nf \f[C] 2015/5/30 \ \ \ \ expenses:food\ \ \ \ \ $10\ \ \ ;\ food\ purchased\ on\ saturday\ 5/30 \ \ \ \ assets:checking\ \ \ \ \ \ \ \ \ ;\ bank\ cleared\ it\ on\ monday,\ date:6/1 \f[] .fi .IP .nf \f[C] $\ hledger\ \-f\ t.j\ register\ food 2015/05/30\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ expenses:food\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $10\ \ \ \ \ \ \ \ \ \ \ $10 \f[] .fi .IP .nf \f[C] $\ hledger\ \-f\ t.j\ register\ checking 2015/06/01\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ assets:checking\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $\-10\ \ \ \ \ \ \ \ \ \ $\-10 \f[] .fi .PP DATE should be a simple date; if the year is not specified it will use the year of the transaction\[aq]s date. You can set the secondary date similarly, with \f[C]date2:DATE2\f[]. The \f[C]date:\f[] or \f[C]date2:\f[] tags must have a valid simple date value if they are present, eg a \f[C]date:\f[] tag with no value is not allowed. .PP Ledger\[aq]s earlier, more compact bracketed date syntax is also supported: \f[C][DATE]\f[], \f[C][DATE=DATE2]\f[] or \f[C][=DATE2]\f[]. hledger will attempt to parse any square\-bracketed sequence of the \f[C]0123456789/\-.=\f[] characters in this way. With this syntax, DATE infers its year from the transaction and DATE2 infers its year from DATE. .SS Account names .PP Account names typically have several parts separated by a full colon, from which hledger derives a hierarchical chart of accounts. They can be anything you like, but in finance there are traditionally five top\-level accounts: \f[C]assets\f[], \f[C]liabilities\f[], \f[C]income\f[], \f[C]expenses\f[], and \f[C]equity\f[]. .PP Account names may contain single spaces, eg: \f[C]assets:accounts\ receivable\f[]. Because of this, they must always be followed by \f[B]two or more spaces\f[] (or newline). .PP Account names can be aliased. .SS Amounts .PP After the account name, there is usually an amount. Important: between account name and amount, there must be \f[B]two or more spaces\f[]. .PP Amounts consist of a number and (usually) a currency symbol or commodity name. Some examples: .PP \f[C]2.00001\f[] .PD 0 .P .PD \f[C]$1\f[] .PD 0 .P .PD \f[C]4000\ AAPL\f[] .PD 0 .P .PD \f[C]3\ "green\ apples"\f[] .PD 0 .P .PD \f[C]\-$1,000,000.00\f[] .PD 0 .P .PD \f[C]INR\ 9,99,99,999.00\f[] .PD 0 .P .PD \f[C]EUR\ \-2.000.000,00\f[] .PP As you can see, the amount format is somewhat flexible: .IP \[bu] 2 amounts are a number (the "quantity") and optionally a currency symbol/commodity name (the "commodity"). .IP \[bu] 2 the commodity is a symbol, word, or phrase, on the left or right, with or without a separating space. If the commodity contains numbers, spaces or non\-word punctuation it must be enclosed in double quotes. .IP \[bu] 2 negative amounts with a commodity on the left can have the minus sign before or after it .IP \[bu] 2 digit groups (thousands, or any other grouping) can be separated by commas (in which case period is used for decimal point) or periods (in which case comma is used for decimal point) .PP You can use any of these variations when recording data, but when hledger displays amounts, it will choose a consistent format for each commodity. (Except for price amounts, which are always formatted as written). The display format is chosen as follows: .IP \[bu] 2 if there is a commodity directive specifying the format, that is used .IP \[bu] 2 otherwise the format is inferred from the first posting amount in that commodity in the journal, and the precision (number of decimal places) will be the maximum from all posting amounts in that commmodity .IP \[bu] 2 or if there are no such amounts in the journal, a default format is used (like \f[C]$1000.00\f[]). .PP Price amounts and amounts in D directives usually don\[aq]t affect amount format inference, but in some situations they can do so indirectly. (Eg when D\[aq]s default commodity is applied to a commodity\-less amount, or when an amountless posting is balanced using a price\[aq]s commodity, or when \-V is used.) If you find this causing problems, set the desired format with a commodity directive. .SS Virtual Postings .PP When you parenthesise the account name in a posting, we call that a \f[I]virtual posting\f[], which means: .IP \[bu] 2 it is ignored when checking that the transaction is balanced .IP \[bu] 2 it is excluded from reports when the \f[C]\-\-real/\-R\f[] flag is used, or the \f[C]real:1\f[] query. .PP You could use this, eg, to set an account\[aq]s opening balance without needing to use the \f[C]equity:opening\ balances\f[] account: .IP .nf \f[C] 1/1\ special\ unbalanced\ posting\ to\ set\ initial\ balance \ \ (assets:checking)\ \ \ $1000 \f[] .fi .PP When the account name is bracketed, we call it a \f[I]balanced virtual posting\f[]. This is like an ordinary virtual posting except the balanced virtual postings in a transaction must balance to 0, like the real postings (but separately from them). Balanced virtual postings are also excluded by \f[C]\-\-real/\-R\f[] or \f[C]real:1\f[]. .IP .nf \f[C] 1/1\ buy\ food\ with\ cash,\ and\ update\ some\ budget\-tracking\ subaccounts\ elsewhere \ \ expenses:food\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $10 \ \ assets:cash\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $\-10 \ \ [assets:checking:available]\ \ \ \ \ $10 \ \ [assets:checking:budget:food]\ \ $\-10 \f[] .fi .PP Virtual postings have some legitimate uses, but those are few. You can usually find an equivalent journal entry using real postings, which is more correct and provides better error checking. .SS Balance Assertions .PP hledger supports Ledger\-style balance assertions in journal files. These look like \f[C]=EXPECTEDBALANCE\f[] following a posting\[aq]s amount. Eg in this example we assert the expected dollar balance in accounts a and b after each posting: .IP .nf \f[C] 2013/1/1 \ \ a\ \ \ $1\ \ =$1 \ \ b\ \ \ \ \ \ \ =$\-1 2013/1/2 \ \ a\ \ \ $1\ \ =$2 \ \ b\ \ $\-1\ \ =$\-2 \f[] .fi .PP After reading a journal file, hledger will check all balance assertions and report an error if any of them fail. Balance assertions can protect you from, eg, inadvertently disrupting reconciled balances while cleaning up old entries. You can disable them temporarily with the \f[C]\-\-ignore\-assertions\f[] flag, which can be useful for troubleshooting or for reading Ledger files. .SS Assertions and ordering .PP hledger sorts an account\[aq]s postings and assertions first by date and then (for postings on the same day) by parse order. Note this is different from Ledger, which sorts assertions only by parse order. (Also, Ledger assertions do not see the accumulated effect of repeated postings to the same account within a transaction.) .PP So, hledger balance assertions keep working if you reorder differently\-dated transactions within the journal. But if you reorder same\-dated transactions or postings, assertions might break and require updating. This order dependence does bring an advantage: precise control over the order of postings and assertions within a day, so you can assert intra\-day balances. .SS Assertions and included files .PP With included files, things are a little more complicated. Including preserves the ordering of postings and assertions. If you have multiple postings to an account on the same day, split across different files, and you also want to assert the account\[aq]s balance on the same day, you\[aq]ll have to put the assertion in the right file. .SS Assertions and multiple \-f options .PP Balance assertions don\[aq]t work well across files specified with multiple \-f options. Use include or concatenate the files instead. .SS Assertions and commodities .PP The asserted balance must be a simple single\-commodity amount, and in fact the assertion checks only this commodity\[aq]s balance within the (possibly multi\-commodity) account balance. We could call this a partial balance assertion. This is compatible with Ledger, and makes it possible to make assertions about accounts containing multiple commodities. .PP To assert each commodity\[aq]s balance in such a multi\-commodity account, you can add multiple postings (with amount 0 if necessary). But note that no matter how many assertions you add, you can\[aq]t be sure the account does not contain some unexpected commodity. (We\[aq]ll add support for this kind of total balance assertion if there\[aq]s demand.) .SS Assertions and subaccounts .PP Balance assertions do not count the balance from subaccounts; they check the posted account\[aq]s exclusive balance. For example: .IP .nf \f[C] 1/1 \ \ checking:fund\ \ \ 1\ =\ 1\ \ ;\ post\ to\ this\ subaccount,\ its\ balance\ is\ now\ 1 \ \ checking\ \ \ \ \ \ \ \ 1\ =\ 1\ \ ;\ post\ to\ the\ parent\ account,\ its\ exclusive\ balance\ is\ now\ 1 \ \ equity \f[] .fi .PP The balance report\[aq]s flat mode shows these exclusive balances more clearly: .IP .nf \f[C] $\ hledger\ bal\ checking\ \-\-flat \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 1\ \ checking \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 1\ \ checking:fund \-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 2 \f[] .fi .SS Assertions and virtual postings .PP Balance assertions are checked against all postings, both real and virtual. They are not affected by the \f[C]\-\-real/\-R\f[] flag or \f[C]real:\f[] query. .SS Balance Assignments .PP Ledger\-style balance assignments are also supported. These are like balance assertions, but with no posting amount on the left side of the equals sign; instead it is calculated automatically so as to satisfy the assertion. This can be a convenience during data entry, eg when setting opening balances: .IP .nf \f[C] ;\ starting\ a\ new\ journal,\ set\ asset\ account\ balances\ 2016/1/1\ opening\ balances \ \ assets:checking\ \ \ \ \ \ \ \ \ \ \ \ =\ $409.32 \ \ assets:savings\ \ \ \ \ \ \ \ \ \ \ \ \ =\ $735.24 \ \ assets:cash\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ =\ $42 \ \ equity:opening\ balances \f[] .fi .PP or when adjusting a balance to reality: .IP .nf \f[C] ;\ no\ cash\ left;\ update\ balance,\ record\ any\ untracked\ spending\ as\ a\ generic\ expense 2016/1/15 \ \ assets:cash\ \ \ \ =\ $0 \ \ expenses:misc \f[] .fi .PP The calculated amount depends on the account\[aq]s balance in the commodity at that point (which depends on the previously\-dated postings of the commodity to that account since the last balance assertion or assignment). Note that using balance assignments makes your journal a little less explicit; to know the exact amount posted, you have to run hledger or do the calculations yourself, instead of just reading it. .SS Prices .SS Transaction prices .PP Within a transaction posting, you can record an amount\[aq]s price in another commodity. This can be used to document the cost (for a purchase), or selling price (for a sale), or the exchange rate that was used, for this transaction. These transaction prices are fixed, and do not change over time. .PP Amounts with transaction prices can be displayed in the transaction price\[aq]s commodity, by using the \f[C]\-\-cost/\-B\f[] flag supported by most hledger commands (mnemonic: "cost Basis"). .PP There are several ways to record a transaction price: .IP "1." 3 Write the unit price (aka exchange rate), as \f[C]\@\ UNITPRICE\f[] after the amount: .RS 4 .IP .nf \f[C] 2009/1/1 \ \ assets:foreign\ currency\ \ \ €100\ \@\ $1.35\ \ ;\ one\ hundred\ euros\ at\ $1.35\ each \ \ assets:cash \f[] .fi .RE .IP "2." 3 Or write the total price, as \f[C]\@\@\ TOTALPRICE\f[] after the amount: .RS 4 .IP .nf \f[C] 2009/1/1 \ \ assets:foreign\ currency\ \ \ €100\ \@\@\ $135\ \ ;\ one\ hundred\ euros\ at\ $135\ for\ the\ lot \ \ assets:cash \f[] .fi .RE .IP "3." 3 Or let hledger infer the price so as to balance the transaction. To permit this, you must fully specify all posting amounts, and their sum must have a non\-zero amount in exactly two commodities: .RS 4 .IP .nf \f[C] 2009/1/1 \ \ assets:foreign\ currency\ \ \ €100\ \ \ \ \ \ \ \ \ \ ;\ one\ hundred\ euros \ \ assets:cash\ \ \ \ \ \ \ \ \ \ \ \ \ \ $\-135\ \ \ \ \ \ \ \ \ \ ;\ exchanged\ for\ $135 \f[] .fi .RE .PP With any of the above examples we get: .IP .nf \f[C] $\ hledger\ print\ \-B 2009/01/01 \ \ \ \ assets:foreign\ currency\ \ \ \ \ \ \ $135.00 \ \ \ \ assets:cash\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $\-135.00 \f[] .fi .PP Example use for transaction prices: recording the effective conversion rate of purchases made in a foreign currency. .SS Market prices .PP Market prices are not tied to a particular transaction; they represent historical exchange rates between two commodities. (Ledger calls them historical prices.) For example, the prices published by a stock exchange or the foreign exchange market. Some commands (balance, currently) can use this information to show the market value of things at a given date. .PP To record market prices, use P directives in the main journal or in an included file. Their format is: .IP .nf \f[C] P\ DATE\ COMMODITYBEINGPRICED\ UNITPRICE \f[] .fi .PP DATE is a simple date as usual. COMMODITYBEINGPRICED is the symbol of the commodity being priced. UNITPRICE is an ordinary amount (symbol and quantity) in a second commodity, specifying the unit price or conversion rate for the first commodity in terms of the second, on the given date. .PP For example, the following directives say that one euro was worth 1.35 US dollars during 2009, and $1.40 from 2010 onward: .IP .nf \f[C] P\ 2009/1/1\ €\ $1.35 P\ 2010/1/1\ €\ $1.40 \f[] .fi .SS Comments .PP Lines in the journal beginning with a semicolon (\f[C];\f[]) or hash (\f[C]#\f[]) or asterisk (\f[C]*\f[]) are comments, and will be ignored. (Asterisk comments make it easy to treat your journal like an org\-mode outline in emacs.) .PP Also, anything between \f[C]comment\f[] and \f[C]end\ comment\f[] directives is a (multi\-line) comment. If there is no \f[C]end\ comment\f[], the comment extends to the end of the file. .PP You can attach comments to a transaction by writing them after the description and/or indented on the following lines (before the postings). Similarly, you can attach comments to an individual posting by writing them after the amount and/or indented on the following lines. .PP Some examples: .IP .nf \f[C] #\ a\ journal\ comment ;\ also\ a\ journal\ comment comment This\ is\ a\ multiline\ comment, which\ continues\ until\ a\ line where\ the\ "end\ comment"\ string appears\ on\ its\ own. end\ comment 2012/5/14\ something\ \ ;\ a\ transaction\ comment \ \ \ \ ;\ the\ transaction\ comment,\ continued \ \ \ \ posting1\ \ 1\ \ ;\ a\ comment\ for\ posting\ 1 \ \ \ \ posting2 \ \ \ \ ;\ a\ comment\ for\ posting\ 2 \ \ \ \ ;\ another\ comment\ line\ for\ posting\ 2 ;\ a\ journal\ comment\ (because\ not\ indented) \f[] .fi .SS Tags .PP Tags are a way to add extra labels or labelled data to postings and transactions, which you can then search or pivot on. .PP A simple tag is a word (which may contain hyphens) followed by a full colon, written inside a transaction or posting comment line: .IP .nf \f[C] 2017/1/16\ bought\ groceries\ \ \ \ ;\ sometag: \f[] .fi .PP Tags can have a value, which is the text after the colon, up to the next comma or end of line, with leading/trailing whitespace removed: .IP .nf \f[C] \ \ \ \ expenses:food\ \ \ \ $10\ \ \ ;\ a\-posting\-tag:\ the\ tag\ value \f[] .fi .PP Note this means hledger\[aq]s tag values can not contain commas or newlines. Ending at commas means you can write multiple short tags on one line, comma separated: .IP .nf \f[C] \ \ \ \ assets:checking\ \ \ \ \ \ \ ;\ a\ comment\ containing\ tag1:,\ tag2:\ some\ value\ ... \f[] .fi .PP Here, .IP \[bu] 2 "\f[C]a\ comment\ containing\f[]" is just comment text, not a tag .IP \[bu] 2 "\f[C]tag1\f[]" is a tag with no value .IP \[bu] 2 "\f[C]tag2\f[]" is another tag, whose value is "\f[C]some\ value\ ...\f[]" .PP Tags in a transaction comment affect the transaction and all of its postings, while tags in a posting comment affect only that posting. For example, the following transaction has three tags (\f[C]A\f[], \f[C]TAG2\f[], \f[C]third\-tag\f[]) and the posting has four (those plus \f[C]posting\-tag\f[]): .IP .nf \f[C] 1/1\ a\ transaction\ \ ;\ A:,\ TAG2: \ \ \ \ ;\ third\-tag:\ a\ third\ transaction\ tag,\ <\-\ with\ a\ value \ \ \ \ (a)\ \ $1\ \ ;\ posting\-tag: \f[] .fi .PP Tags are like Ledger\[aq]s metadata feature, except hledger\[aq]s tag values are simple strings. .SS Implicit tags .PP Some predefined "implicit" tags are also provided: .IP \[bu] 2 \f[C]code\f[] \- the transaction\[aq]s code field .IP \[bu] 2 \f[C]description\f[] \- the transaction\[aq]s description .IP \[bu] 2 \f[C]payee\f[] \- the part of description before \f[C]|\f[], or all of it .IP \[bu] 2 \f[C]note\f[] \- the part of description after \f[C]|\f[], or all of it .PP \f[C]payee\f[] and \f[C]note\f[] support descriptions written in a special \f[C]PAYEE\ |\ NOTE\f[] format, accessing the parts before and after the pipe character respectively. For descriptions not containing a pipe character they are the same as \f[C]description\f[]. .SS Directives .SS Account aliases .PP You can define aliases which rewrite your account names (after reading the journal, before generating reports). hledger\[aq]s account aliases can be useful for: .IP \[bu] 2 expanding shorthand account names to their full form, allowing easier data entry and a less verbose journal .IP \[bu] 2 adapting old journals to your current chart of accounts .IP \[bu] 2 experimenting with new account organisations, like a new hierarchy or combining two accounts into one .IP \[bu] 2 customising reports .PP See also Cookbook: rewrite account names. .SS Basic aliases .PP To set an account alias, use the \f[C]alias\f[] directive in your journal file. This affects all subsequent journal entries in the current file or its included files. The spaces around the = are optional: .IP .nf \f[C] alias\ OLD\ =\ NEW \f[] .fi .PP Or, you can use the \f[C]\-\-alias\ \[aq]OLD=NEW\[aq]\f[] option on the command line. This affects all entries. It\[aq]s useful for trying out aliases interactively. .PP OLD and NEW are full account names. hledger will replace any occurrence of the old account name with the new one. Subaccounts are also affected. Eg: .IP .nf \f[C] alias\ checking\ =\ assets:bank:wells\ fargo:checking #\ rewrites\ "checking"\ to\ "assets:bank:wells\ fargo:checking",\ or\ "checking:a"\ to\ "assets:bank:wells\ fargo:checking:a" \f[] .fi .SS Regex aliases .PP There is also a more powerful variant that uses a regular expression, indicated by the forward slashes. (This was the default behaviour in hledger 0.24\-0.25): .IP .nf \f[C] alias\ /REGEX/\ =\ REPLACEMENT \f[] .fi .PP or \f[C]\-\-alias\ \[aq]/REGEX/=REPLACEMENT\[aq]\f[]. .PP REGEX is a case\-insensitive regular expression. Anywhere it matches inside an account name, the matched part will be replaced by REPLACEMENT. If REGEX contains parenthesised match groups, these can be referenced by the usual numeric backreferences in REPLACEMENT. Note, currently regular expression aliases may cause noticeable slow\-downs. (And if you use Ledger on your hledger file, they will be ignored.) Eg: .IP .nf \f[C] alias\ /^(.+):bank:([^:]+)(.*)/\ =\ \\1:\\2\ \\3 #\ rewrites\ "assets:bank:wells\ fargo:checking"\ to\ \ "assets:wells\ fargo\ checking" \f[] .fi .SS Multiple aliases .PP You can define as many aliases as you like using directives or command\-line options. Aliases are recursive \- each alias sees the result of applying previous ones. (This is different from Ledger, where aliases are non\-recursive by default). Aliases are applied in the following order: .IP "1." 3 alias directives, most recently seen first (recent directives take precedence over earlier ones; directives not yet seen are ignored) .IP "2." 3 alias options, in the order they appear on the command line .SS end aliases .PP You can clear (forget) all currently defined aliases with the \f[C]end\ aliases\f[] directive: .IP .nf \f[C] end\ aliases \f[] .fi .SS account directive .PP The \f[C]account\f[] directive predefines account names, as in Ledger and Beancount. This may be useful for your own documentation; hledger doesn\[aq]t make use of it yet. .IP .nf \f[C] ;\ account\ ACCT ;\ \ \ OPTIONAL\ COMMENTS/TAGS... account\ assets:bank:checking \ a\ comment \ acct\-no:12345 account\ expenses:food ;\ etc. \f[] .fi .SS apply account directive .PP You can specify a parent account which will be prepended to all accounts within a section of the journal. Use the \f[C]apply\ account\f[] and \f[C]end\ apply\ account\f[] directives like so: .IP .nf \f[C] apply\ account\ home 2010/1/1 \ \ \ \ food\ \ \ \ $10 \ \ \ \ cash end\ apply\ account \f[] .fi .PP which is equivalent to: .IP .nf \f[C] 2010/01/01 \ \ \ \ home:food\ \ \ \ \ \ \ \ \ \ \ $10 \ \ \ \ home:cash\ \ \ \ \ \ \ \ \ \ $\-10 \f[] .fi .PP If \f[C]end\ apply\ account\f[] is omitted, the effect lasts to the end of the file. Included files are also affected, eg: .IP .nf \f[C] apply\ account\ business include\ biz.journal end\ apply\ account apply\ account\ personal include\ personal.journal \f[] .fi .PP Prior to hledger 1.0, legacy \f[C]account\f[] and \f[C]end\f[] spellings were also supported. .SS Multi\-line comments .PP A line containing just \f[C]comment\f[] starts a multi\-line comment, and a line containing just \f[C]end\ comment\f[] ends it. See comments. .SS commodity directive .PP The \f[C]commodity\f[] directive predefines commodities (currently this is just informational), and also it may define the display format for amounts in this commodity (overriding the automatically inferred format). .PP It may be written on a single line, like this: .IP .nf \f[C] ;\ commodity\ EXAMPLEAMOUNT ;\ display\ AAAA\ amounts\ with\ the\ symbol\ on\ the\ right,\ space\-separated, ;\ using\ period\ as\ decimal\ point,\ with\ four\ decimal\ places,\ and ;\ separating\ thousands\ with\ comma. commodity\ 1,000.0000\ AAAA \f[] .fi .PP or on multiple lines, using the "format" subdirective. In this case the commodity symbol appears twice and should be the same in both places: .IP .nf \f[C] ;\ commodity\ SYMBOL ;\ \ \ format\ EXAMPLEAMOUNT ;\ display\ indian\ rupees\ with\ currency\ name\ on\ the\ left, ;\ thousands,\ lakhs\ and\ crores\ comma\-separated, ;\ period\ as\ decimal\ point,\ and\ two\ decimal\ places. commodity\ INR \ \ format\ INR\ 9,99,99,999.00 \f[] .fi .SS Default commodity .PP The D directive sets a default commodity (and display format), to be used for amounts without a commodity symbol (ie, plain numbers). (Note this differs from Ledger\[aq]s default commodity directive.) The commodity and display format will be applied to all subsequent commodity\-less amounts, or until the next D directive. .IP .nf \f[C] #\ commodity\-less\ amounts\ should\ be\ treated\ as\ dollars #\ (and\ displayed\ with\ symbol\ on\ the\ left,\ thousands\ separators\ and\ two\ decimal\ places) D\ $1,000.00 1/1 \ \ a\ \ \ \ \ 5\ \ \ \ #\ <\-\ commodity\-less\ amount,\ becomes\ $1 \ \ b \f[] .fi .SS Default year .PP You can set a default year to be used for subsequent dates which don\[aq]t specify a year. This is a line beginning with \f[C]Y\f[] followed by the year. Eg: .IP .nf \f[C] Y2009\ \ \ \ \ \ ;\ set\ default\ year\ to\ 2009 12/15\ \ \ \ \ \ ;\ equivalent\ to\ 2009/12/15 \ \ expenses\ \ 1 \ \ assets Y2010\ \ \ \ \ \ ;\ change\ default\ year\ to\ 2010 2009/1/30\ \ ;\ specifies\ the\ year,\ not\ affected \ \ expenses\ \ 1 \ \ assets 1/31\ \ \ \ \ \ \ ;\ equivalent\ to\ 2010/1/31 \ \ expenses\ \ 1 \ \ assets \f[] .fi .SS Including other files .PP You can pull in the content of additional journal files by writing an include directive, like this: .IP .nf \f[C] include\ path/to/file.journal \f[] .fi .PP If the path does not begin with a slash, it is relative to the current file. Glob patterns (\f[C]*\f[]) are not currently supported. .PP The \f[C]include\f[] directive can only be used in journal files. It can include journal, timeclock or timedot files, but not CSV files. .SH EDITOR SUPPORT .PP Add\-on modes exist for various text editors, to make working with journal files easier. They add colour, navigation aids and helpful commands. For hledger users who edit the journal file directly (the majority), using one of these modes is quite recommended. .PP These were written with Ledger in mind, but also work with hledger files: .PP .TS tab(@); lw(16.5n) lw(51.5n). T{ Emacs T}@T{ http://www.ledger\-cli.org/3.0/doc/ledger\-mode.html T} T{ Vim T}@T{ https://github.com/ledger/ledger/wiki/Getting\-started T} T{ Sublime Text T}@T{ https://github.com/ledger/ledger/wiki/Using\-Sublime\-Text T} T{ Textmate T}@T{ https://github.com/ledger/ledger/wiki/Using\-TextMate\-2 T} T{ Text Wrangler \ T}@T{ https://github.com/ledger/ledger/wiki/Editing\-Ledger\-files\-with\-TextWrangler T} .TE .SH "REPORTING BUGS" Report bugs at http://bugs.hledger.org (or on the #hledger IRC channel or hledger mail list) .SH AUTHORS Simon Michael and contributors .SH COPYRIGHT Copyright (C) 2007-2016 Simon Michael. .br Released under GNU GPL v3 or later. .SH SEE ALSO hledger(1), hledger\-ui(1), hledger\-web(1), hledger\-api(1), hledger_csv(5), hledger_journal(5), hledger_timeclock(5), hledger_timedot(5), ledger(1) http://hledger.org hledger-lib-1.2/doc/hledger_journal.5.info0000644000000000000000000010577013067574766016710 0ustar0000000000000000This is hledger_journal.5.info, produced by makeinfo version 6.0 from stdin.  File: hledger_journal.5.info, Node: Top, Next: FILE FORMAT, Up: (dir) hledger_journal(5) hledger 1.2 ****************************** hledger's usual data source is a plain text file containing journal entries in hledger journal format. This file represents a standard accounting general journal. I use file names ending in '.journal', but that's not required. The journal file contains a number of transaction entries, each describing a transfer of money (or any commodity) between two or more named accounts, in a simple format readable by both hledger and humans. hledger's journal format is a compatible subset, mostly, of ledger's journal format, so hledger can work with compatible ledger journal files as well. It's safe, and encouraged, to run both hledger and ledger on the same journal file, eg to validate the results you're getting. You can use hledger without learning any more about this file; just use the add or web commands to create and update it. Many users, though, also edit the journal file directly with a text editor, perhaps assisted by the helper modes for emacs or vim. Here's an example: ; A sample journal file. This is a comment. 2008/01/01 income ; <- transaction's first line starts in column 0, contains date and description assets:bank:checking $1 ; <- posting lines start with whitespace, each contains an account name income:salary $-1 ; followed by at least two spaces and an amount 2008/06/01 gift assets:bank:checking $1 ; <- at least two postings in a transaction income:gifts $-1 ; <- their amounts must balance to 0 2008/06/02 save assets:bank:saving $1 assets:bank:checking ; <- one amount may be omitted; here $-1 is inferred 2008/06/03 eat & shop ; <- description can be anything expenses:food $1 expenses:supplies $1 ; <- this transaction debits two expense accounts assets:cash ; <- $-2 inferred 2008/12/31 * pay off ; <- an optional * or ! after the date means "cleared" (or anything you want) liabilities:debts $1 assets:bank:checking * Menu: * FILE FORMAT:: * EDITOR SUPPORT::  File: hledger_journal.5.info, Node: FILE FORMAT, Next: EDITOR SUPPORT, Prev: Top, Up: Top 1 FILE FORMAT ************* * Menu: * Transactions:: * Dates:: * Account names:: * Amounts:: * Virtual Postings:: * Balance Assertions:: * Balance Assignments:: * Prices:: * Comments:: * Tags:: * Directives::  File: hledger_journal.5.info, Node: Transactions, Next: Dates, Up: FILE FORMAT 1.1 Transactions ================ Transactions are represented by journal entries. Each begins with a simple date in column 0, followed by three optional fields with spaces between them: * a status flag, which can be empty or '!' or '*' (meaning "uncleared", "pending" and "cleared", or whatever you want) * a transaction code (eg a check number), * and/or a description then some number of postings, of some amount to some account. Each posting is on its own line, consisting of: * indentation of one or more spaces (or tabs) * optionally, a '!' or '*' status flag followed by a space * an account name, optionally containing single spaces * optionally, two or more spaces or tabs followed by an amount Usually there are two or more postings, though one or none is also possible. The posting amounts within a transaction must always balance, ie add up to 0. Optionally one amount can be left blank, in which case it will be inferred.  File: hledger_journal.5.info, Node: Dates, Next: Account names, Prev: Transactions, Up: FILE FORMAT 1.2 Dates ========= * Menu: * Simple dates:: * Secondary dates:: * Posting dates::  File: hledger_journal.5.info, Node: Simple dates, Next: Secondary dates, Up: Dates 1.2.1 Simple dates ------------------ Within a journal file, transaction dates use Y/M/D (or Y-M-D or Y.M.D) Leading zeros are optional. The year may be omitted, in which case it will be inferred from the context - the current transaction, the default year set with a default year directive, or the current date when the command is run. Some examples: '2010/01/31', '1/31', '2010-01-31', '2010.1.31'.  File: hledger_journal.5.info, Node: Secondary dates, Next: Posting dates, Prev: Simple dates, Up: Dates 1.2.2 Secondary dates --------------------- Real-life transactions sometimes involve more than one date - eg the date you write a cheque, and the date it clears in your bank. When you want to model this, eg for more accurate balances, you can specify individual posting dates, which I recommend. Or, you can use the secondary dates (aka auxiliary/effective dates) feature, supported for compatibility with Ledger. A secondary date can be written after the primary date, separated by an equals sign. The primary date, on the left, is used by default; the secondary date, on the right, is used when the '--date2' flag is specified ('--aux-date' or '--effective' also work). The meaning of secondary dates is up to you, but it's best to follow a consistent rule. Eg write the bank's clearing date as primary, and when needed, the date the transaction was initiated as secondary. Here's an example. Note that a secondary date will use the year of the primary date if unspecified. 2010/2/23=2/19 movie ticket expenses:cinema $10 assets:checking $ hledger register checking 2010/02/23 movie ticket assets:checking $-10 $-10 $ hledger register checking --date2 2010/02/19 movie ticket assets:checking $-10 $-10 Secondary dates require some effort; you must use them consistently in your journal entries and remember whether to use or not use the '--date2' flag for your reports. They are included in hledger for Ledger compatibility, but posting dates are a more powerful and less confusing alternative.  File: hledger_journal.5.info, Node: Posting dates, Prev: Secondary dates, Up: Dates 1.2.3 Posting dates ------------------- You can give individual postings a different date from their parent transaction, by adding a posting comment containing a tag (see below) like 'date:DATE'. This is probably the best way to control posting dates precisely. Eg in this example the expense should appear in May reports, and the deduction from checking should be reported on 6/1 for easy bank reconciliation: 2015/5/30 expenses:food $10 ; food purchased on saturday 5/30 assets:checking ; bank cleared it on monday, date:6/1 $ hledger -f t.j register food 2015/05/30 expenses:food $10 $10 $ hledger -f t.j register checking 2015/06/01 assets:checking $-10 $-10 DATE should be a simple date; if the year is not specified it will use the year of the transaction's date. You can set the secondary date similarly, with 'date2:DATE2'. The 'date:' or 'date2:' tags must have a valid simple date value if they are present, eg a 'date:' tag with no value is not allowed. Ledger's earlier, more compact bracketed date syntax is also supported: '[DATE]', '[DATE=DATE2]' or '[=DATE2]'. hledger will attempt to parse any square-bracketed sequence of the '0123456789/-.=' characters in this way. With this syntax, DATE infers its year from the transaction and DATE2 infers its year from DATE.  File: hledger_journal.5.info, Node: Account names, Next: Amounts, Prev: Dates, Up: FILE FORMAT 1.3 Account names ================= Account names typically have several parts separated by a full colon, from which hledger derives a hierarchical chart of accounts. They can be anything you like, but in finance there are traditionally five top-level accounts: 'assets', 'liabilities', 'income', 'expenses', and 'equity'. Account names may contain single spaces, eg: 'assets:accounts receivable'. Because of this, they must always be followed by *two or more spaces* (or newline). Account names can be aliased.  File: hledger_journal.5.info, Node: Amounts, Next: Virtual Postings, Prev: Account names, Up: FILE FORMAT 1.4 Amounts =========== After the account name, there is usually an amount. Important: between account name and amount, there must be *two or more spaces*. Amounts consist of a number and (usually) a currency symbol or commodity name. Some examples: '2.00001' '$1' '4000 AAPL' '3 "green apples"' '-$1,000,000.00' 'INR 9,99,99,999.00' 'EUR -2.000.000,00' As you can see, the amount format is somewhat flexible: * amounts are a number (the "quantity") and optionally a currency symbol/commodity name (the "commodity"). * the commodity is a symbol, word, or phrase, on the left or right, with or without a separating space. If the commodity contains numbers, spaces or non-word punctuation it must be enclosed in double quotes. * negative amounts with a commodity on the left can have the minus sign before or after it * digit groups (thousands, or any other grouping) can be separated by commas (in which case period is used for decimal point) or periods (in which case comma is used for decimal point) You can use any of these variations when recording data, but when hledger displays amounts, it will choose a consistent format for each commodity. (Except for price amounts, which are always formatted as written). The display format is chosen as follows: * if there is a commodity directive specifying the format, that is used * otherwise the format is inferred from the first posting amount in that commodity in the journal, and the precision (number of decimal places) will be the maximum from all posting amounts in that commmodity * or if there are no such amounts in the journal, a default format is used (like '$1000.00'). Price amounts and amounts in D directives usually don't affect amount format inference, but in some situations they can do so indirectly. (Eg when D's default commodity is applied to a commodity-less amount, or when an amountless posting is balanced using a price's commodity, or when -V is used.) If you find this causing problems, set the desired format with a commodity directive.  File: hledger_journal.5.info, Node: Virtual Postings, Next: Balance Assertions, Prev: Amounts, Up: FILE FORMAT 1.5 Virtual Postings ==================== When you parenthesise the account name in a posting, we call that a _virtual posting_, which means: * it is ignored when checking that the transaction is balanced * it is excluded from reports when the '--real/-R' flag is used, or the 'real:1' query. You could use this, eg, to set an account's opening balance without needing to use the 'equity:opening balances' account: 1/1 special unbalanced posting to set initial balance (assets:checking) $1000 When the account name is bracketed, we call it a _balanced virtual posting_. This is like an ordinary virtual posting except the balanced virtual postings in a transaction must balance to 0, like the real postings (but separately from them). Balanced virtual postings are also excluded by '--real/-R' or 'real:1'. 1/1 buy food with cash, and update some budget-tracking subaccounts elsewhere expenses:food $10 assets:cash $-10 [assets:checking:available] $10 [assets:checking:budget:food] $-10 Virtual postings have some legitimate uses, but those are few. You can usually find an equivalent journal entry using real postings, which is more correct and provides better error checking.  File: hledger_journal.5.info, Node: Balance Assertions, Next: Balance Assignments, Prev: Virtual Postings, Up: FILE FORMAT 1.6 Balance Assertions ====================== hledger supports Ledger-style balance assertions in journal files. These look like '=EXPECTEDBALANCE' following a posting's amount. Eg in this example we assert the expected dollar balance in accounts a and b after each posting: 2013/1/1 a $1 =$1 b =$-1 2013/1/2 a $1 =$2 b $-1 =$-2 After reading a journal file, hledger will check all balance assertions and report an error if any of them fail. Balance assertions can protect you from, eg, inadvertently disrupting reconciled balances while cleaning up old entries. You can disable them temporarily with the '--ignore-assertions' flag, which can be useful for troubleshooting or for reading Ledger files. * Menu: * Assertions and ordering:: * Assertions and included files:: * Assertions and multiple -f options:: * Assertions and commodities:: * Assertions and subaccounts:: * Assertions and virtual postings::  File: hledger_journal.5.info, Node: Assertions and ordering, Next: Assertions and included files, Up: Balance Assertions 1.6.1 Assertions and ordering ----------------------------- hledger sorts an account's postings and assertions first by date and then (for postings on the same day) by parse order. Note this is different from Ledger, which sorts assertions only by parse order. (Also, Ledger assertions do not see the accumulated effect of repeated postings to the same account within a transaction.) So, hledger balance assertions keep working if you reorder differently-dated transactions within the journal. But if you reorder same-dated transactions or postings, assertions might break and require updating. This order dependence does bring an advantage: precise control over the order of postings and assertions within a day, so you can assert intra-day balances.  File: hledger_journal.5.info, Node: Assertions and included files, Next: Assertions and multiple -f options, Prev: Assertions and ordering, Up: Balance Assertions 1.6.2 Assertions and included files ----------------------------------- With included files, things are a little more complicated. Including preserves the ordering of postings and assertions. If you have multiple postings to an account on the same day, split across different files, and you also want to assert the account's balance on the same day, you'll have to put the assertion in the right file.  File: hledger_journal.5.info, Node: Assertions and multiple -f options, Next: Assertions and commodities, Prev: Assertions and included files, Up: Balance Assertions 1.6.3 Assertions and multiple -f options ---------------------------------------- Balance assertions don't work well across files specified with multiple -f options. Use include or concatenate the files instead.  File: hledger_journal.5.info, Node: Assertions and commodities, Next: Assertions and subaccounts, Prev: Assertions and multiple -f options, Up: Balance Assertions 1.6.4 Assertions and commodities -------------------------------- The asserted balance must be a simple single-commodity amount, and in fact the assertion checks only this commodity's balance within the (possibly multi-commodity) account balance. We could call this a partial balance assertion. This is compatible with Ledger, and makes it possible to make assertions about accounts containing multiple commodities. To assert each commodity's balance in such a multi-commodity account, you can add multiple postings (with amount 0 if necessary). But note that no matter how many assertions you add, you can't be sure the account does not contain some unexpected commodity. (We'll add support for this kind of total balance assertion if there's demand.)  File: hledger_journal.5.info, Node: Assertions and subaccounts, Next: Assertions and virtual postings, Prev: Assertions and commodities, Up: Balance Assertions 1.6.5 Assertions and subaccounts -------------------------------- Balance assertions do not count the balance from subaccounts; they check the posted account's exclusive balance. For example: 1/1 checking:fund 1 = 1 ; post to this subaccount, its balance is now 1 checking 1 = 1 ; post to the parent account, its exclusive balance is now 1 equity The balance report's flat mode shows these exclusive balances more clearly: $ hledger bal checking --flat 1 checking 1 checking:fund -------------------- 2  File: hledger_journal.5.info, Node: Assertions and virtual postings, Prev: Assertions and subaccounts, Up: Balance Assertions 1.6.6 Assertions and virtual postings ------------------------------------- Balance assertions are checked against all postings, both real and virtual. They are not affected by the '--real/-R' flag or 'real:' query.  File: hledger_journal.5.info, Node: Balance Assignments, Next: Prices, Prev: Balance Assertions, Up: FILE FORMAT 1.7 Balance Assignments ======================= Ledger-style balance assignments are also supported. These are like balance assertions, but with no posting amount on the left side of the equals sign; instead it is calculated automatically so as to satisfy the assertion. This can be a convenience during data entry, eg when setting opening balances: ; starting a new journal, set asset account balances 2016/1/1 opening balances assets:checking = $409.32 assets:savings = $735.24 assets:cash = $42 equity:opening balances or when adjusting a balance to reality: ; no cash left; update balance, record any untracked spending as a generic expense 2016/1/15 assets:cash = $0 expenses:misc The calculated amount depends on the account's balance in the commodity at that point (which depends on the previously-dated postings of the commodity to that account since the last balance assertion or assignment). Note that using balance assignments makes your journal a little less explicit; to know the exact amount posted, you have to run hledger or do the calculations yourself, instead of just reading it.  File: hledger_journal.5.info, Node: Prices, Next: Comments, Prev: Balance Assignments, Up: FILE FORMAT 1.8 Prices ========== * Menu: * Transaction prices:: * Market prices::  File: hledger_journal.5.info, Node: Transaction prices, Next: Market prices, Up: Prices 1.8.1 Transaction prices ------------------------ Within a transaction posting, you can record an amount's price in another commodity. This can be used to document the cost (for a purchase), or selling price (for a sale), or the exchange rate that was used, for this transaction. These transaction prices are fixed, and do not change over time. Amounts with transaction prices can be displayed in the transaction price's commodity, by using the '--cost/-B' flag supported by most hledger commands (mnemonic: "cost Basis"). There are several ways to record a transaction price: 1. Write the unit price (aka exchange rate), as '@ UNITPRICE' after the amount: 2009/1/1 assets:foreign currency €100 @ $1.35 ; one hundred euros at $1.35 each assets:cash 2. Or write the total price, as '@@ TOTALPRICE' after the amount: 2009/1/1 assets:foreign currency €100 @@ $135 ; one hundred euros at $135 for the lot assets:cash 3. Or let hledger infer the price so as to balance the transaction. To permit this, you must fully specify all posting amounts, and their sum must have a non-zero amount in exactly two commodities: 2009/1/1 assets:foreign currency €100 ; one hundred euros assets:cash $-135 ; exchanged for $135 With any of the above examples we get: $ hledger print -B 2009/01/01 assets:foreign currency $135.00 assets:cash $-135.00 Example use for transaction prices: recording the effective conversion rate of purchases made in a foreign currency.  File: hledger_journal.5.info, Node: Market prices, Prev: Transaction prices, Up: Prices 1.8.2 Market prices ------------------- Market prices are not tied to a particular transaction; they represent historical exchange rates between two commodities. (Ledger calls them historical prices.) For example, the prices published by a stock exchange or the foreign exchange market. Some commands (balance, currently) can use this information to show the market value of things at a given date. To record market prices, use P directives in the main journal or in an included file. Their format is: P DATE COMMODITYBEINGPRICED UNITPRICE DATE is a simple date as usual. COMMODITYBEINGPRICED is the symbol of the commodity being priced. UNITPRICE is an ordinary amount (symbol and quantity) in a second commodity, specifying the unit price or conversion rate for the first commodity in terms of the second, on the given date. For example, the following directives say that one euro was worth 1.35 US dollars during 2009, and $1.40 from 2010 onward: P 2009/1/1 € $1.35 P 2010/1/1 € $1.40  File: hledger_journal.5.info, Node: Comments, Next: Tags, Prev: Prices, Up: FILE FORMAT 1.9 Comments ============ Lines in the journal beginning with a semicolon (';') or hash ('#') or asterisk ('*') are comments, and will be ignored. (Asterisk comments make it easy to treat your journal like an org-mode outline in emacs.) Also, anything between 'comment' and 'end comment' directives is a (multi-line) comment. If there is no 'end comment', the comment extends to the end of the file. You can attach comments to a transaction by writing them after the description and/or indented on the following lines (before the postings). Similarly, you can attach comments to an individual posting by writing them after the amount and/or indented on the following lines. Some examples: # a journal comment ; also a journal comment comment This is a multiline comment, which continues until a line where the "end comment" string appears on its own. end comment 2012/5/14 something ; a transaction comment ; the transaction comment, continued posting1 1 ; a comment for posting 1 posting2 ; a comment for posting 2 ; another comment line for posting 2 ; a journal comment (because not indented)  File: hledger_journal.5.info, Node: Tags, Next: Directives, Prev: Comments, Up: FILE FORMAT 1.10 Tags ========= Tags are a way to add extra labels or labelled data to postings and transactions, which you can then search or pivot on. A simple tag is a word (which may contain hyphens) followed by a full colon, written inside a transaction or posting comment line: 2017/1/16 bought groceries ; sometag: Tags can have a value, which is the text after the colon, up to the next comma or end of line, with leading/trailing whitespace removed: expenses:food $10 ; a-posting-tag: the tag value Note this means hledger's tag values can not contain commas or newlines. Ending at commas means you can write multiple short tags on one line, comma separated: assets:checking ; a comment containing tag1:, tag2: some value ... Here, * "'a comment containing'" is just comment text, not a tag * "'tag1'" is a tag with no value * "'tag2'" is another tag, whose value is "'some value ...'" Tags in a transaction comment affect the transaction and all of its postings, while tags in a posting comment affect only that posting. For example, the following transaction has three tags ('A', 'TAG2', 'third-tag') and the posting has four (those plus 'posting-tag'): 1/1 a transaction ; A:, TAG2: ; third-tag: a third transaction tag, <- with a value (a) $1 ; posting-tag: Tags are like Ledger's metadata feature, except hledger's tag values are simple strings. * Menu: * Implicit tags::  File: hledger_journal.5.info, Node: Implicit tags, Up: Tags 1.10.1 Implicit tags -------------------- Some predefined "implicit" tags are also provided: * 'code' - the transaction's code field * 'description' - the transaction's description * 'payee' - the part of description before '|', or all of it * 'note' - the part of description after '|', or all of it 'payee' and 'note' support descriptions written in a special 'PAYEE | NOTE' format, accessing the parts before and after the pipe character respectively. For descriptions not containing a pipe character they are the same as 'description'.  File: hledger_journal.5.info, Node: Directives, Prev: Tags, Up: FILE FORMAT 1.11 Directives =============== * Menu: * Account aliases:: * account directive:: * apply account directive:: * Multi-line comments:: * commodity directive:: * Default commodity:: * Default year:: * Including other files::  File: hledger_journal.5.info, Node: Account aliases, Next: account directive, Up: Directives 1.11.1 Account aliases ---------------------- You can define aliases which rewrite your account names (after reading the journal, before generating reports). hledger's account aliases can be useful for: * expanding shorthand account names to their full form, allowing easier data entry and a less verbose journal * adapting old journals to your current chart of accounts * experimenting with new account organisations, like a new hierarchy or combining two accounts into one * customising reports See also Cookbook: rewrite account names. * Menu: * Basic aliases:: * Regex aliases:: * Multiple aliases:: * end aliases::  File: hledger_journal.5.info, Node: Basic aliases, Next: Regex aliases, Up: Account aliases 1.11.1.1 Basic aliases ...................... To set an account alias, use the 'alias' directive in your journal file. This affects all subsequent journal entries in the current file or its included files. The spaces around the = are optional: alias OLD = NEW Or, you can use the '--alias 'OLD=NEW'' option on the command line. This affects all entries. It's useful for trying out aliases interactively. OLD and NEW are full account names. hledger will replace any occurrence of the old account name with the new one. Subaccounts are also affected. Eg: alias checking = assets:bank:wells fargo:checking # rewrites "checking" to "assets:bank:wells fargo:checking", or "checking:a" to "assets:bank:wells fargo:checking:a"  File: hledger_journal.5.info, Node: Regex aliases, Next: Multiple aliases, Prev: Basic aliases, Up: Account aliases 1.11.1.2 Regex aliases ...................... There is also a more powerful variant that uses a regular expression, indicated by the forward slashes. (This was the default behaviour in hledger 0.24-0.25): alias /REGEX/ = REPLACEMENT or '--alias '/REGEX/=REPLACEMENT''. REGEX is a case-insensitive regular expression. Anywhere it matches inside an account name, the matched part will be replaced by REPLACEMENT. If REGEX contains parenthesised match groups, these can be referenced by the usual numeric backreferences in REPLACEMENT. Note, currently regular expression aliases may cause noticeable slow-downs. (And if you use Ledger on your hledger file, they will be ignored.) Eg: alias /^(.+):bank:([^:]+)(.*)/ = \1:\2 \3 # rewrites "assets:bank:wells fargo:checking" to "assets:wells fargo checking"  File: hledger_journal.5.info, Node: Multiple aliases, Next: end aliases, Prev: Regex aliases, Up: Account aliases 1.11.1.3 Multiple aliases ......................... You can define as many aliases as you like using directives or command-line options. Aliases are recursive - each alias sees the result of applying previous ones. (This is different from Ledger, where aliases are non-recursive by default). Aliases are applied in the following order: 1. alias directives, most recently seen first (recent directives take precedence over earlier ones; directives not yet seen are ignored) 2. alias options, in the order they appear on the command line  File: hledger_journal.5.info, Node: end aliases, Prev: Multiple aliases, Up: Account aliases 1.11.1.4 end aliases .................... You can clear (forget) all currently defined aliases with the 'end aliases' directive: end aliases  File: hledger_journal.5.info, Node: account directive, Next: apply account directive, Prev: Account aliases, Up: Directives 1.11.2 account directive ------------------------ The 'account' directive predefines account names, as in Ledger and Beancount. This may be useful for your own documentation; hledger doesn't make use of it yet. ; account ACCT ; OPTIONAL COMMENTS/TAGS... account assets:bank:checking a comment acct-no:12345 account expenses:food ; etc.  File: hledger_journal.5.info, Node: apply account directive, Next: Multi-line comments, Prev: account directive, Up: Directives 1.11.3 apply account directive ------------------------------ You can specify a parent account which will be prepended to all accounts within a section of the journal. Use the 'apply account' and 'end apply account' directives like so: apply account home 2010/1/1 food $10 cash end apply account which is equivalent to: 2010/01/01 home:food $10 home:cash $-10 If 'end apply account' is omitted, the effect lasts to the end of the file. Included files are also affected, eg: apply account business include biz.journal end apply account apply account personal include personal.journal Prior to hledger 1.0, legacy 'account' and 'end' spellings were also supported.  File: hledger_journal.5.info, Node: Multi-line comments, Next: commodity directive, Prev: apply account directive, Up: Directives 1.11.4 Multi-line comments -------------------------- A line containing just 'comment' starts a multi-line comment, and a line containing just 'end comment' ends it. See comments.  File: hledger_journal.5.info, Node: commodity directive, Next: Default commodity, Prev: Multi-line comments, Up: Directives 1.11.5 commodity directive -------------------------- The 'commodity' directive predefines commodities (currently this is just informational), and also it may define the display format for amounts in this commodity (overriding the automatically inferred format). It may be written on a single line, like this: ; commodity EXAMPLEAMOUNT ; display AAAA amounts with the symbol on the right, space-separated, ; using period as decimal point, with four decimal places, and ; separating thousands with comma. commodity 1,000.0000 AAAA or on multiple lines, using the "format" subdirective. In this case the commodity symbol appears twice and should be the same in both places: ; commodity SYMBOL ; format EXAMPLEAMOUNT ; display indian rupees with currency name on the left, ; thousands, lakhs and crores comma-separated, ; period as decimal point, and two decimal places. commodity INR format INR 9,99,99,999.00  File: hledger_journal.5.info, Node: Default commodity, Next: Default year, Prev: commodity directive, Up: Directives 1.11.6 Default commodity ------------------------ The D directive sets a default commodity (and display format), to be used for amounts without a commodity symbol (ie, plain numbers). (Note this differs from Ledger's default commodity directive.) The commodity and display format will be applied to all subsequent commodity-less amounts, or until the next D directive. # commodity-less amounts should be treated as dollars # (and displayed with symbol on the left, thousands separators and two decimal places) D $1,000.00 1/1 a 5 # <- commodity-less amount, becomes $1 b  File: hledger_journal.5.info, Node: Default year, Next: Including other files, Prev: Default commodity, Up: Directives 1.11.7 Default year ------------------- You can set a default year to be used for subsequent dates which don't specify a year. This is a line beginning with 'Y' followed by the year. Eg: Y2009 ; set default year to 2009 12/15 ; equivalent to 2009/12/15 expenses 1 assets Y2010 ; change default year to 2010 2009/1/30 ; specifies the year, not affected expenses 1 assets 1/31 ; equivalent to 2010/1/31 expenses 1 assets  File: hledger_journal.5.info, Node: Including other files, Prev: Default year, Up: Directives 1.11.8 Including other files ---------------------------- You can pull in the content of additional journal files by writing an include directive, like this: include path/to/file.journal If the path does not begin with a slash, it is relative to the current file. Glob patterns ('*') are not currently supported. The 'include' directive can only be used in journal files. It can include journal, timeclock or timedot files, but not CSV files.  File: hledger_journal.5.info, Node: EDITOR SUPPORT, Prev: FILE FORMAT, Up: Top 2 EDITOR SUPPORT **************** Add-on modes exist for various text editors, to make working with journal files easier. They add colour, navigation aids and helpful commands. For hledger users who edit the journal file directly (the majority), using one of these modes is quite recommended. These were written with Ledger in mind, but also work with hledger files: Emacs http://www.ledger-cli.org/3.0/doc/ledger-mode.html Vim https://github.com/ledger/ledger/wiki/Getting-started Sublime Text https://github.com/ledger/ledger/wiki/Using-Sublime-Text Textmate https://github.com/ledger/ledger/wiki/Using-TextMate-2 Text Wrangler https://github.com/ledger/ledger/wiki/Editing-Ledger-files-with-TextWrangler  Tag Table: Node: Top78 Node: FILE FORMAT2292 Ref: #file-format2418 Node: Transactions2601 Ref: #transactions2721 Node: Dates3663 Ref: #dates3791 Node: Simple dates3856 Ref: #simple-dates3984 Node: Secondary dates4350 Ref: #secondary-dates4506 Node: Posting dates6069 Ref: #posting-dates6200 Node: Account names7574 Ref: #account-names7713 Node: Amounts8200 Ref: #amounts8338 Node: Virtual Postings10439 Ref: #virtual-postings10600 Node: Balance Assertions11820 Ref: #balance-assertions11997 Node: Assertions and ordering12893 Ref: #assertions-and-ordering13081 Node: Assertions and included files13781 Ref: #assertions-and-included-files14024 Node: Assertions and multiple -f options14357 Ref: #assertions-and-multiple--f-options14613 Node: Assertions and commodities14745 Ref: #assertions-and-commodities14982 Node: Assertions and subaccounts15678 Ref: #assertions-and-subaccounts15912 Node: Assertions and virtual postings16433 Ref: #assertions-and-virtual-postings16642 Node: Balance Assignments16784 Ref: #balance-assignments16953 Node: Prices18072 Ref: #prices18205 Node: Transaction prices18256 Ref: #transaction-prices18401 Node: Market prices19978 Ref: #market-prices20113 Node: Comments21086 Ref: #comments21208 Node: Tags22321 Ref: #tags22441 Node: Implicit tags23870 Ref: #implicit-tags23978 Node: Directives24495 Ref: #directives24610 Node: Account aliases24803 Ref: #account-aliases24949 Node: Basic aliases25553 Ref: #basic-aliases25698 Node: Regex aliases26388 Ref: #regex-aliases26558 Node: Multiple aliases27329 Ref: #multiple-aliases27503 Node: end aliases28001 Ref: #end-aliases28143 Node: account directive28244 Ref: #account-directive28426 Node: apply account directive28722 Ref: #apply-account-directive28920 Node: Multi-line comments29579 Ref: #multi-line-comments29771 Node: commodity directive29899 Ref: #commodity-directive30085 Node: Default commodity30957 Ref: #default-commodity31132 Node: Default year31669 Ref: #default-year31836 Node: Including other files32259 Ref: #including-other-files32418 Node: EDITOR SUPPORT32815 Ref: #editor-support32935  End Tag Table hledger-lib-1.2/doc/hledger_journal.5.txt0000644000000000000000000010033613067574771016561 0ustar0000000000000000 hledger_journal(5) hledger User Manuals hledger_journal(5) NAME Journal - hledger's default file format, representing a General Journal DESCRIPTION hledger's usual data source is a plain text file containing journal entries in hledger journal format. This file represents a standard accounting general journal. I use file names ending in .journal, but that's not required. The journal file contains a number of transaction entries, each describing a transfer of money (or any commodity) between two or more named accounts, in a simple format readable by both hledger and humans. hledger's journal format is a compatible subset, mostly, of ledger's journal format, so hledger can work with compatible ledger journal files as well. It's safe, and encouraged, to run both hledger and ledger on the same journal file, eg to validate the results you're get- ting. You can use hledger without learning any more about this file; just use the add or web commands to create and update it. Many users, though, also edit the journal file directly with a text editor, perhaps assisted by the helper modes for emacs or vim. Here's an example: ; A sample journal file. This is a comment. 2008/01/01 income ; <- transaction's first line starts in column 0, contains date and description assets:bank:checking $1 ; <- posting lines start with whitespace, each contains an account name income:salary $-1 ; followed by at least two spaces and an amount 2008/06/01 gift assets:bank:checking $1 ; <- at least two postings in a transaction income:gifts $-1 ; <- their amounts must balance to 0 2008/06/02 save assets:bank:saving $1 assets:bank:checking ; <- one amount may be omitted; here $-1 is inferred 2008/06/03 eat & shop ; <- description can be anything expenses:food $1 expenses:supplies $1 ; <- this transaction debits two expense accounts assets:cash ; <- $-2 inferred 2008/12/31 * pay off ; <- an optional * or ! after the date means "cleared" (or anything you want) liabilities:debts $1 assets:bank:checking FILE FORMAT Transactions Transactions are represented by journal entries. Each begins with a simple date in column 0, followed by three optional fields with spaces between them: o a status flag, which can be empty or ! or * (meaning "uncleared", "pending" and "cleared", or whatever you want) o a transaction code (eg a check number), o and/or a description then some number of postings, of some amount to some account. Each posting is on its own line, consisting of: o indentation of one or more spaces (or tabs) o optionally, a ! or * status flag followed by a space o an account name, optionally containing single spaces o optionally, two or more spaces or tabs followed by an amount Usually there are two or more postings, though one or none is also pos- sible. The posting amounts within a transaction must always balance, ie add up to 0. Optionally one amount can be left blank, in which case it will be inferred. Dates Simple dates Within a journal file, transaction dates use Y/M/D (or Y-M-D or Y.M.D) Leading zeros are optional. The year may be omitted, in which case it will be inferred from the context - the current transaction, the default year set with a default year directive, or the current date when the command is run. Some examples: 2010/01/31, 1/31, 2010-01-31, 2010.1.31. Secondary dates Real-life transactions sometimes involve more than one date - eg the date you write a cheque, and the date it clears in your bank. When you want to model this, eg for more accurate balances, you can specify individual posting dates, which I recommend. Or, you can use the sec- ondary dates (aka auxiliary/effective dates) feature, supported for compatibility with Ledger. A secondary date can be written after the primary date, separated by an equals sign. The primary date, on the left, is used by default; the secondary date, on the right, is used when the --date2 flag is speci- fied (--aux-date or --effective also work). The meaning of secondary dates is up to you, but it's best to follow a consistent rule. Eg write the bank's clearing date as primary, and when needed, the date the transaction was initiated as secondary. Here's an example. Note that a secondary date will use the year of the primary date if unspecified. 2010/2/23=2/19 movie ticket expenses:cinema $10 assets:checking $ hledger register checking 2010/02/23 movie ticket assets:checking $-10 $-10 $ hledger register checking --date2 2010/02/19 movie ticket assets:checking $-10 $-10 Secondary dates require some effort; you must use them consistently in your journal entries and remember whether to use or not use the --date2 flag for your reports. They are included in hledger for Ledger compat- ibility, but posting dates are a more powerful and less confusing alternative. Posting dates You can give individual postings a different date from their parent transaction, by adding a posting comment containing a tag (see below) like date:DATE. This is probably the best way to control posting dates precisely. Eg in this example the expense should appear in May reports, and the deduction from checking should be reported on 6/1 for easy bank reconciliation: 2015/5/30 expenses:food $10 ; food purchased on saturday 5/30 assets:checking ; bank cleared it on monday, date:6/1 $ hledger -f t.j register food 2015/05/30 expenses:food $10 $10 $ hledger -f t.j register checking 2015/06/01 assets:checking $-10 $-10 DATE should be a simple date; if the year is not specified it will use the year of the transaction's date. You can set the secondary date similarly, with date2:DATE2. The date: or date2: tags must have a valid simple date value if they are present, eg a date: tag with no value is not allowed. Ledger's earlier, more compact bracketed date syntax is also supported: [DATE], [DATE=DATE2] or [=DATE2]. hledger will attempt to parse any square-bracketed sequence of the 0123456789/-.= characters in this way. With this syntax, DATE infers its year from the transaction and DATE2 infers its year from DATE. Account names Account names typically have several parts separated by a full colon, from which hledger derives a hierarchical chart of accounts. They can be anything you like, but in finance there are traditionally five top-level accounts: assets, liabilities, income, expenses, and equity. Account names may contain single spaces, eg: assets:accounts receiv- able. Because of this, they must always be followed by two or more spaces (or newline). Account names can be aliased. Amounts After the account name, there is usually an amount. Important: between account name and amount, there must be two or more spaces. Amounts consist of a number and (usually) a currency symbol or commod- ity name. Some examples: 2.00001 $1 4000 AAPL 3 "green apples" -$1,000,000.00 INR 9,99,99,999.00 EUR -2.000.000,00 As you can see, the amount format is somewhat flexible: o amounts are a number (the "quantity") and optionally a currency sym- bol/commodity name (the "commodity"). o the commodity is a symbol, word, or phrase, on the left or right, with or without a separating space. If the commodity contains num- bers, spaces or non-word punctuation it must be enclosed in double quotes. o negative amounts with a commodity on the left can have the minus sign before or after it o digit groups (thousands, or any other grouping) can be separated by commas (in which case period is used for decimal point) or periods (in which case comma is used for decimal point) You can use any of these variations when recording data, but when hledger displays amounts, it will choose a consistent format for each commodity. (Except for price amounts, which are always formatted as written). The display format is chosen as follows: o if there is a commodity directive specifying the format, that is used o otherwise the format is inferred from the first posting amount in that commodity in the journal, and the precision (number of decimal places) will be the maximum from all posting amounts in that commmod- ity o or if there are no such amounts in the journal, a default format is used (like $1000.00). Price amounts and amounts in D directives usually don't affect amount format inference, but in some situations they can do so indirectly. (Eg when D's default commodity is applied to a commodity-less amount, or when an amountless posting is balanced using a price's commodity, or when -V is used.) If you find this causing problems, set the desired format with a commodity directive. Virtual Postings When you parenthesise the account name in a posting, we call that a virtual posting, which means: o it is ignored when checking that the transaction is balanced o it is excluded from reports when the --real/-R flag is used, or the real:1 query. You could use this, eg, to set an account's opening balance without needing to use the equity:opening balances account: 1/1 special unbalanced posting to set initial balance (assets:checking) $1000 When the account name is bracketed, we call it a balanced virtual post- ing. This is like an ordinary virtual posting except the balanced vir- tual postings in a transaction must balance to 0, like the real post- ings (but separately from them). Balanced virtual postings are also excluded by --real/-R or real:1. 1/1 buy food with cash, and update some budget-tracking subaccounts elsewhere expenses:food $10 assets:cash $-10 [assets:checking:available] $10 [assets:checking:budget:food] $-10 Virtual postings have some legitimate uses, but those are few. You can usually find an equivalent journal entry using real postings, which is more correct and provides better error checking. Balance Assertions hledger supports Ledger-style balance assertions in journal files. These look like =EXPECTEDBALANCE following a posting's amount. Eg in this example we assert the expected dollar balance in accounts a and b after each posting: 2013/1/1 a $1 =$1 b =$-1 2013/1/2 a $1 =$2 b $-1 =$-2 After reading a journal file, hledger will check all balance assertions and report an error if any of them fail. Balance assertions can pro- tect you from, eg, inadvertently disrupting reconciled balances while cleaning up old entries. You can disable them temporarily with the --ignore-assertions flag, which can be useful for troubleshooting or for reading Ledger files. Assertions and ordering hledger sorts an account's postings and assertions first by date and then (for postings on the same day) by parse order. Note this is dif- ferent from Ledger, which sorts assertions only by parse order. (Also, Ledger assertions do not see the accumulated effect of repeated post- ings to the same account within a transaction.) So, hledger balance assertions keep working if you reorder differ- ently-dated transactions within the journal. But if you reorder same-dated transactions or postings, assertions might break and require updating. This order dependence does bring an advantage: precise con- trol over the order of postings and assertions within a day, so you can assert intra-day balances. Assertions and included files With included files, things are a little more complicated. Including preserves the ordering of postings and assertions. If you have multi- ple postings to an account on the same day, split across different files, and you also want to assert the account's balance on the same day, you'll have to put the assertion in the right file. Assertions and multiple -f options Balance assertions don't work well across files specified with multiple -f options. Use include or concatenate the files instead. Assertions and commodities The asserted balance must be a simple single-commodity amount, and in fact the assertion checks only this commodity's balance within the (possibly multi-commodity) account balance. We could call this a par- tial balance assertion. This is compatible with Ledger, and makes it possible to make assertions about accounts containing multiple commodi- ties. To assert each commodity's balance in such a multi-commodity account, you can add multiple postings (with amount 0 if necessary). But note that no matter how many assertions you add, you can't be sure the account does not contain some unexpected commodity. (We'll add support for this kind of total balance assertion if there's demand.) Assertions and subaccounts Balance assertions do not count the balance from subaccounts; they check the posted account's exclusive balance. For example: 1/1 checking:fund 1 = 1 ; post to this subaccount, its balance is now 1 checking 1 = 1 ; post to the parent account, its exclusive balance is now 1 equity The balance report's flat mode shows these exclusive balances more clearly: $ hledger bal checking --flat 1 checking 1 checking:fund -------------------- 2 Assertions and virtual postings Balance assertions are checked against all postings, both real and vir- tual. They are not affected by the --real/-R flag or real: query. Balance Assignments Ledger-style balance assignments are also supported. These are like balance assertions, but with no posting amount on the left side of the equals sign; instead it is calculated automatically so as to satisfy the assertion. This can be a convenience during data entry, eg when setting opening balances: ; starting a new journal, set asset account balances 2016/1/1 opening balances assets:checking = $409.32 assets:savings = $735.24 assets:cash = $42 equity:opening balances or when adjusting a balance to reality: ; no cash left; update balance, record any untracked spending as a generic expense 2016/1/15 assets:cash = $0 expenses:misc The calculated amount depends on the account's balance in the commodity at that point (which depends on the previously-dated postings of the commodity to that account since the last balance assertion or assign- ment). Note that using balance assignments makes your journal a little less explicit; to know the exact amount posted, you have to run hledger or do the calculations yourself, instead of just reading it. Prices Transaction prices Within a transaction posting, you can record an amount's price in another commodity. This can be used to document the cost (for a pur- chase), or selling price (for a sale), or the exchange rate that was used, for this transaction. These transaction prices are fixed, and do not change over time. Amounts with transaction prices can be displayed in the transaction price's commodity, by using the --cost/-B flag supported by most hledger commands (mnemonic: "cost Basis"). There are several ways to record a transaction price: 1. Write the unit price (aka exchange rate), as @ UNITPRICE after the amount: 2009/1/1 assets:foreign currency 100 @ $1.35 ; one hundred euros at $1.35 each assets:cash 2. Or write the total price, as @@ TOTALPRICE after the amount: 2009/1/1 assets:foreign currency 100 @@ $135 ; one hundred euros at $135 for the lot assets:cash 3. Or let hledger infer the price so as to balance the transaction. To permit this, you must fully specify all posting amounts, and their sum must have a non-zero amount in exactly two commodities: 2009/1/1 assets:foreign currency 100 ; one hundred euros assets:cash $-135 ; exchanged for $135 With any of the above examples we get: $ hledger print -B 2009/01/01 assets:foreign currency $135.00 assets:cash $-135.00 Example use for transaction prices: recording the effective conversion rate of purchases made in a foreign currency. Market prices Market prices are not tied to a particular transaction; they represent historical exchange rates between two commodities. (Ledger calls them historical prices.) For example, the prices published by a stock exchange or the foreign exchange market. Some commands (balance, cur- rently) can use this information to show the market value of things at a given date. To record market prices, use P directives in the main journal or in an included file. Their format is: P DATE COMMODITYBEINGPRICED UNITPRICE DATE is a simple date as usual. COMMODITYBEINGPRICED is the symbol of the commodity being priced. UNITPRICE is an ordinary amount (symbol and quantity) in a second commodity, specifying the unit price or con- version rate for the first commodity in terms of the second, on the given date. For example, the following directives say that one euro was worth 1.35 US dollars during 2009, and $1.40 from 2010 onward: P 2009/1/1 $1.35 P 2010/1/1 $1.40 Comments Lines in the journal beginning with a semicolon (;) or hash (#) or asterisk (*) are comments, and will be ignored. (Asterisk comments make it easy to treat your journal like an org-mode outline in emacs.) Also, anything between comment and end comment directives is a (multi-line) comment. If there is no end comment, the comment extends to the end of the file. You can attach comments to a transaction by writing them after the description and/or indented on the following lines (before the post- ings). Similarly, you can attach comments to an individual posting by writing them after the amount and/or indented on the following lines. Some examples: # a journal comment ; also a journal comment comment This is a multiline comment, which continues until a line where the "end comment" string appears on its own. end comment 2012/5/14 something ; a transaction comment ; the transaction comment, continued posting1 1 ; a comment for posting 1 posting2 ; a comment for posting 2 ; another comment line for posting 2 ; a journal comment (because not indented) Tags Tags are a way to add extra labels or labelled data to postings and transactions, which you can then search or pivot on. A simple tag is a word (which may contain hyphens) followed by a full colon, written inside a transaction or posting comment line: 2017/1/16 bought groceries ; sometag: Tags can have a value, which is the text after the colon, up to the next comma or end of line, with leading/trailing whitespace removed: expenses:food $10 ; a-posting-tag: the tag value Note this means hledger's tag values can not contain commas or new- lines. Ending at commas means you can write multiple short tags on one line, comma separated: assets:checking ; a comment containing tag1:, tag2: some value ... Here, o "a comment containing" is just comment text, not a tag o "tag1" is a tag with no value o "tag2" is another tag, whose value is "some value ..." Tags in a transaction comment affect the transaction and all of its postings, while tags in a posting comment affect only that posting. For example, the following transaction has three tags (A, TAG2, third-tag) and the posting has four (those plus posting-tag): 1/1 a transaction ; A:, TAG2: ; third-tag: a third transaction tag, <- with a value (a) $1 ; posting-tag: Tags are like Ledger's metadata feature, except hledger's tag values are simple strings. Implicit tags Some predefined "implicit" tags are also provided: o code - the transaction's code field o description - the transaction's description o payee - the part of description before |, or all of it o note - the part of description after |, or all of it payee and note support descriptions written in a special PAYEE | NOTE format, accessing the parts before and after the pipe character respec- tively. For descriptions not containing a pipe character they are the same as description. Directives Account aliases You can define aliases which rewrite your account names (after reading the journal, before generating reports). hledger's account aliases can be useful for: o expanding shorthand account names to their full form, allowing easier data entry and a less verbose journal o adapting old journals to your current chart of accounts o experimenting with new account organisations, like a new hierarchy or combining two accounts into one o customising reports See also Cookbook: rewrite account names. Basic aliases To set an account alias, use the alias directive in your journal file. This affects all subsequent journal entries in the current file or its included files. The spaces around the = are optional: alias OLD = NEW Or, you can use the --alias 'OLD=NEW' option on the command line. This affects all entries. It's useful for trying out aliases interactively. OLD and NEW are full account names. hledger will replace any occur- rence of the old account name with the new one. Subaccounts are also affected. Eg: alias checking = assets:bank:wells fargo:checking # rewrites "checking" to "assets:bank:wells fargo:checking", or "checking:a" to "assets:bank:wells fargo:checking:a" Regex aliases There is also a more powerful variant that uses a regular expression, indicated by the forward slashes. (This was the default behaviour in hledger 0.24-0.25): alias /REGEX/ = REPLACEMENT or --alias '/REGEX/=REPLACEMENT'. REGEX is a case-insensitive regular expression. Anywhere it matches inside an account name, the matched part will be replaced by REPLACE- MENT. If REGEX contains parenthesised match groups, these can be ref- erenced by the usual numeric backreferences in REPLACEMENT. Note, cur- rently regular expression aliases may cause noticeable slow-downs. (And if you use Ledger on your hledger file, they will be ignored.) Eg: alias /^(.+):bank:([^:]+)(.*)/ = \1:\2 \3 # rewrites "assets:bank:wells fargo:checking" to "assets:wells fargo checking" Multiple aliases You can define as many aliases as you like using directives or com- mand-line options. Aliases are recursive - each alias sees the result of applying previous ones. (This is different from Ledger, where aliases are non-recursive by default). Aliases are applied in the fol- lowing order: 1. alias directives, most recently seen first (recent directives take precedence over earlier ones; directives not yet seen are ignored) 2. alias options, in the order they appear on the command line end aliases You can clear (forget) all currently defined aliases with the end aliases directive: end aliases account directive The account directive predefines account names, as in Ledger and Bean- count. This may be useful for your own documentation; hledger doesn't make use of it yet. ; account ACCT ; OPTIONAL COMMENTS/TAGS... account assets:bank:checking a comment acct-no:12345 account expenses:food ; etc. apply account directive You can specify a parent account which will be prepended to all accounts within a section of the journal. Use the apply account and end apply account directives like so: apply account home 2010/1/1 food $10 cash end apply account which is equivalent to: 2010/01/01 home:food $10 home:cash $-10 If end apply account is omitted, the effect lasts to the end of the file. Included files are also affected, eg: apply account business include biz.journal end apply account apply account personal include personal.journal Prior to hledger 1.0, legacy account and end spellings were also sup- ported. Multi-line comments A line containing just comment starts a multi-line comment, and a line containing just end comment ends it. See comments. commodity directive The commodity directive predefines commodities (currently this is just informational), and also it may define the display format for amounts in this commodity (overriding the automatically inferred format). It may be written on a single line, like this: ; commodity EXAMPLEAMOUNT ; display AAAA amounts with the symbol on the right, space-separated, ; using period as decimal point, with four decimal places, and ; separating thousands with comma. commodity 1,000.0000 AAAA or on multiple lines, using the "format" subdirective. In this case the commodity symbol appears twice and should be the same in both places: ; commodity SYMBOL ; format EXAMPLEAMOUNT ; display indian rupees with currency name on the left, ; thousands, lakhs and crores comma-separated, ; period as decimal point, and two decimal places. commodity INR format INR 9,99,99,999.00 Default commodity The D directive sets a default commodity (and display format), to be used for amounts without a commodity symbol (ie, plain numbers). (Note this differs from Ledger's default commodity directive.) The commodity and display format will be applied to all subsequent commodity-less amounts, or until the next D directive. # commodity-less amounts should be treated as dollars # (and displayed with symbol on the left, thousands separators and two decimal places) D $1,000.00 1/1 a 5 # <- commodity-less amount, becomes $1 b Default year You can set a default year to be used for subsequent dates which don't specify a year. This is a line beginning with Y followed by the year. Eg: Y2009 ; set default year to 2009 12/15 ; equivalent to 2009/12/15 expenses 1 assets Y2010 ; change default year to 2010 2009/1/30 ; specifies the year, not affected expenses 1 assets 1/31 ; equivalent to 2010/1/31 expenses 1 assets Including other files You can pull in the content of additional journal files by writing an include directive, like this: include path/to/file.journal If the path does not begin with a slash, it is relative to the current file. Glob patterns (*) are not currently supported. The include directive can only be used in journal files. It can include journal, timeclock or timedot files, but not CSV files. EDITOR SUPPORT Add-on modes exist for various text editors, to make working with jour- nal files easier. They add colour, navigation aids and helpful com- mands. For hledger users who edit the journal file directly (the majority), using one of these modes is quite recommended. These were written with Ledger in mind, but also work with hledger files: Emacs http://www.ledger-cli.org/3.0/doc/ledger-mode.html Vim https://github.com/ledger/ledger/wiki/Get- ting-started Sublime Text https://github.com/ledger/ledger/wiki/Using-Sub- lime-Text Textmate https://github.com/ledger/ledger/wiki/Using-Text- Mate-2 Text Wrangler https://github.com/ledger/ledger/wiki/Edit- ing-Ledger-files-with-TextWrangler REPORTING BUGS Report bugs at http://bugs.hledger.org (or on the #hledger IRC channel or hledger mail list) AUTHORS Simon Michael and contributors COPYRIGHT Copyright (C) 2007-2016 Simon Michael. Released under GNU GPL v3 or later. SEE ALSO hledger(1), hledger-ui(1), hledger-web(1), hledger-api(1), hledger_csv(5), hledger_journal(5), hledger_timeclock(5), hledger_time- dot(5), ledger(1) http://hledger.org hledger 1.2 March 2017 hledger_journal(5) hledger-lib-1.2/doc/hledger_timeclock.50000644000000000000000000000557613067574770016254 0ustar0000000000000000 .TH "hledger_timeclock" "5" "March 2017" "hledger 1.2" "hledger User Manuals" .SH NAME .PP Timeclock \- the time logging format of timeclock.el, as read by hledger .SH DESCRIPTION .PP hledger can read timeclock files. As with Ledger, these are (a subset of) timeclock.el\[aq]s format, containing clock\-in and clock\-out entries as in the example below. The date is a simple date. The time format is HH:MM[:SS][+\-ZZZZ]. Seconds and timezone are optional. The timezone, if present, must be four digits and is ignored (currently the time is always interpreted as a local time). .IP .nf \f[C] i\ 2015/03/30\ 09:00:00\ some:account\ name\ \ optional\ description\ after\ two\ spaces o\ 2015/03/30\ 09:20:00 i\ 2015/03/31\ 22:21:45\ another\ account o\ 2015/04/01\ 02:00:34 \f[] .fi .PP hledger treats each clock\-in/clock\-out pair as a transaction posting some number of hours to an account. Or if the session spans more than one day, it is split into several transactions, one for each day. For the above time log, \f[C]hledger\ print\f[] generates these journal entries: .IP .nf \f[C] $\ hledger\ \-f\ t.timeclock\ print 2015/03/30\ *\ optional\ description\ after\ two\ spaces \ \ \ \ (some:account\ name)\ \ \ \ \ \ \ \ \ 0.33h 2015/03/31\ *\ 22:21\-23:59 \ \ \ \ (another\ account)\ \ \ \ \ \ \ \ \ 1.64h 2015/04/01\ *\ 00:00\-02:00 \ \ \ \ (another\ account)\ \ \ \ \ \ \ \ \ 2.01h \f[] .fi .PP Here is a sample.timeclock to download and some queries to try: .IP .nf \f[C] $\ hledger\ \-f\ sample.timeclock\ balance\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ #\ current\ time\ balances $\ hledger\ \-f\ sample.timeclock\ register\ \-p\ 2009/3\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ #\ sessions\ in\ march\ 2009 $\ hledger\ \-f\ sample.timeclock\ register\ \-p\ weekly\ \-\-depth\ 1\ \-\-empty\ \ #\ time\ summary\ by\ week \f[] .fi .PP To generate time logs, ie to clock in and clock out, you could: .IP \[bu] 2 use emacs and the built\-in timeclock.el, or the extended timeclock\-x.el and perhaps the extras in ledgerutils.el .IP \[bu] 2 at the command line, use these bash aliases: .RS 2 .IP .nf \f[C] alias\ ti="echo\ i\ `date\ \[aq]+%Y\-%m\-%d\ %H:%M:%S\[aq]`\ \\$*\ >>$TIMELOG" alias\ to="echo\ o\ `date\ \[aq]+%Y\-%m\-%d\ %H:%M:%S\[aq]`\ >>$TIMELOG" \f[] .fi .RE .IP \[bu] 2 or use the old \f[C]ti\f[] and \f[C]to\f[] scripts in the ledger 2.x repository. These rely on a "timeclock" executable which I think is just the ledger 2 executable renamed. .SH "REPORTING BUGS" Report bugs at http://bugs.hledger.org (or on the #hledger IRC channel or hledger mail list) .SH AUTHORS Simon Michael and contributors .SH COPYRIGHT Copyright (C) 2007-2016 Simon Michael. .br Released under GNU GPL v3 or later. .SH SEE ALSO hledger(1), hledger\-ui(1), hledger\-web(1), hledger\-api(1), hledger_csv(5), hledger_journal(5), hledger_timeclock(5), hledger_timedot(5), ledger(1) http://hledger.org hledger-lib-1.2/doc/hledger_timeclock.5.info0000644000000000000000000000426013067574765017177 0ustar0000000000000000This is hledger_timeclock.5.info, produced by makeinfo version 6.0 from stdin.  File: hledger_timeclock.5.info, Node: Top, Up: (dir) hledger_timeclock(5) hledger 1.2 ******************************** hledger can read timeclock files. As with Ledger, these are (a subset of) timeclock.el's format, containing clock-in and clock-out entries as in the example below. The date is a simple date. The time format is HH:MM[:SS][+-ZZZZ]. Seconds and timezone are optional. The timezone, if present, must be four digits and is ignored (currently the time is always interpreted as a local time). i 2015/03/30 09:00:00 some:account name optional description after two spaces o 2015/03/30 09:20:00 i 2015/03/31 22:21:45 another account o 2015/04/01 02:00:34 hledger treats each clock-in/clock-out pair as a transaction posting some number of hours to an account. Or if the session spans more than one day, it is split into several transactions, one for each day. For the above time log, 'hledger print' generates these journal entries: $ hledger -f t.timeclock print 2015/03/30 * optional description after two spaces (some:account name) 0.33h 2015/03/31 * 22:21-23:59 (another account) 1.64h 2015/04/01 * 00:00-02:00 (another account) 2.01h Here is a sample.timeclock to download and some queries to try: $ hledger -f sample.timeclock balance # current time balances $ hledger -f sample.timeclock register -p 2009/3 # sessions in march 2009 $ hledger -f sample.timeclock register -p weekly --depth 1 --empty # time summary by week To generate time logs, ie to clock in and clock out, you could: * use emacs and the built-in timeclock.el, or the extended timeclock-x.el and perhaps the extras in ledgerutils.el * at the command line, use these bash aliases: alias ti="echo i `date '+%Y-%m-%d %H:%M:%S'` \$* >>$TIMELOG" alias to="echo o `date '+%Y-%m-%d %H:%M:%S'` >>$TIMELOG" * or use the old 'ti' and 'to' scripts in the ledger 2.x repository. These rely on a "timeclock" executable which I think is just the ledger 2 executable renamed.  Tag Table: Node: Top80  End Tag Table hledger-lib-1.2/doc/hledger_timeclock.5.txt0000644000000000000000000000601713067574770017061 0ustar0000000000000000 hledger_timeclock(5) hledger User Manuals hledger_timeclock(5) NAME Timeclock - the time logging format of timeclock.el, as read by hledger DESCRIPTION hledger can read timeclock files. As with Ledger, these are (a subset of) timeclock.el's format, containing clock-in and clock-out entries as in the example below. The date is a simple date. The time format is HH:MM[:SS][+-ZZZZ]. Seconds and timezone are optional. The timezone, if present, must be four digits and is ignored (currently the time is always interpreted as a local time). i 2015/03/30 09:00:00 some:account name optional description after two spaces o 2015/03/30 09:20:00 i 2015/03/31 22:21:45 another account o 2015/04/01 02:00:34 hledger treats each clock-in/clock-out pair as a transaction posting some number of hours to an account. Or if the session spans more than one day, it is split into several transactions, one for each day. For the above time log, hledger print generates these journal entries: $ hledger -f t.timeclock print 2015/03/30 * optional description after two spaces (some:account name) 0.33h 2015/03/31 * 22:21-23:59 (another account) 1.64h 2015/04/01 * 00:00-02:00 (another account) 2.01h Here is a sample.timeclock to download and some queries to try: $ hledger -f sample.timeclock balance # current time balances $ hledger -f sample.timeclock register -p 2009/3 # sessions in march 2009 $ hledger -f sample.timeclock register -p weekly --depth 1 --empty # time summary by week To generate time logs, ie to clock in and clock out, you could: o use emacs and the built-in timeclock.el, or the extended time- clock-x.el and perhaps the extras in ledgerutils.el o at the command line, use these bash aliases: alias ti="echo i `date '+%Y-%m-%d %H:%M:%S'` \$* >>$TIMELOG" alias to="echo o `date '+%Y-%m-%d %H:%M:%S'` >>$TIMELOG" o or use the old ti and to scripts in the ledger 2.x repository. These rely on a "timeclock" executable which I think is just the ledger 2 executable renamed. REPORTING BUGS Report bugs at http://bugs.hledger.org (or on the #hledger IRC channel or hledger mail list) AUTHORS Simon Michael and contributors COPYRIGHT Copyright (C) 2007-2016 Simon Michael. Released under GNU GPL v3 or later. SEE ALSO hledger(1), hledger-ui(1), hledger-web(1), hledger-api(1), hledger_csv(5), hledger_journal(5), hledger_timeclock(5), hledger_time- dot(5), ledger(1) http://hledger.org hledger 1.2 March 2017 hledger_timeclock(5) hledger-lib-1.2/doc/hledger_timedot.50000644000000000000000000001035513067574770015736 0ustar0000000000000000 .TH "hledger_timedot" "5" "March 2017" "hledger 1.2" "hledger User Manuals" .SH NAME .PP Timedot \- hledger\[aq]s human\-friendly time logging format .SH DESCRIPTION .PP Timedot is a plain text format for logging dated, categorised quantities (eg time), supported by hledger. It is convenient for approximate and retroactive time logging, eg when the real\-time clock\-in/out required with a timeclock file is too precise or too interruptive. It can be formatted like a bar chart, making clear at a glance where time was spent. .PP Though called "timedot", the format does not specify the commodity being logged, so could represent other dated, quantifiable things. Eg you could record a single\-entry journal of financial transactions, perhaps slightly more conveniently than with hledger_journal(5) format. .SH FILE FORMAT .PP A timedot file contains a series of day entries. A day entry begins with a date, and is followed by category/quantity pairs, one per line. Dates are hledger\-style simple dates (see hledger_journal(5)). Categories are hledger\-style account names, optionally indented. There must be at least two spaces between the category and the quantity. Quantities can be written in two ways: .IP "1." 3 a series of dots (period characters). Each dot represents "a quarter" \- eg, a quarter hour. Spaces can be used to group dots into hours, for easier counting. .IP "2." 3 a number (integer or decimal), representing "units" \- eg, hours. A good alternative when dots are cumbersome. (A number also can record negative quantities.) .PP Blank lines and lines beginning with #, ; or * are ignored. An example: .IP .nf \f[C] #\ on\ this\ day,\ 6h\ was\ spent\ on\ client\ work,\ 1.5h\ on\ haskell\ FOSS\ work,\ etc. 2016/2/1 inc:client1\ \ \ ....\ ....\ ....\ ....\ ....\ .... fos:haskell\ \ \ ....\ ..\ biz:research\ \ . 2016/2/2 inc:client1\ \ \ ....\ .... biz:research\ \ . \f[] .fi .PP Or with numbers: .IP .nf \f[C] 2016/2/3 inc:client1\ \ \ 4 fos:hledger\ \ \ 3 biz:research\ \ 1 \f[] .fi .PP Reporting: .IP .nf \f[C] $\ hledger\ \-f\ t.timedot\ print\ date:2016/2/2 2016/02/02\ * \ \ \ \ (inc:client1)\ \ \ \ \ \ \ \ \ \ 2.00 2016/02/02\ * \ \ \ \ (biz:research)\ \ \ \ \ \ \ \ \ \ 0.25 \f[] .fi .IP .nf \f[C] $\ hledger\ \-f\ t.timedot\ bal\ \-\-daily\ \-\-tree Balance\ changes\ in\ 2016/02/01\-2016/02/03: \ \ \ \ \ \ \ \ \ \ \ \ ||\ \ 2016/02/01d\ \ 2016/02/02d\ \ 2016/02/03d\ ============++======================================== \ biz\ \ \ \ \ \ \ \ ||\ \ \ \ \ \ \ \ \ 0.25\ \ \ \ \ \ \ \ \ 0.25\ \ \ \ \ \ \ \ \ 1.00\ \ \ \ research\ ||\ \ \ \ \ \ \ \ \ 0.25\ \ \ \ \ \ \ \ \ 0.25\ \ \ \ \ \ \ \ \ 1.00\ \ fos\ \ \ \ \ \ \ \ ||\ \ \ \ \ \ \ \ \ 1.50\ \ \ \ \ \ \ \ \ \ \ \ 0\ \ \ \ \ \ \ \ \ 3.00\ \ \ \ haskell\ \ ||\ \ \ \ \ \ \ \ \ 1.50\ \ \ \ \ \ \ \ \ \ \ \ 0\ \ \ \ \ \ \ \ \ \ \ \ 0\ \ \ \ hledger\ \ ||\ \ \ \ \ \ \ \ \ \ \ \ 0\ \ \ \ \ \ \ \ \ \ \ \ 0\ \ \ \ \ \ \ \ \ 3.00\ \ inc\ \ \ \ \ \ \ \ ||\ \ \ \ \ \ \ \ \ 6.00\ \ \ \ \ \ \ \ \ 2.00\ \ \ \ \ \ \ \ \ 4.00\ \ \ \ client1\ \ ||\ \ \ \ \ \ \ \ \ 6.00\ \ \ \ \ \ \ \ \ 2.00\ \ \ \ \ \ \ \ \ 4.00\ \-\-\-\-\-\-\-\-\-\-\-\-++\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- \ \ \ \ \ \ \ \ \ \ \ \ ||\ \ \ \ \ \ \ \ \ 7.75\ \ \ \ \ \ \ \ \ 2.25\ \ \ \ \ \ \ \ \ 8.00\ \f[] .fi .PP I prefer to use period for separating account components. We can make this work with an account alias: .IP .nf \f[C] 2016/2/4 fos.hledger.timedot\ \ 4 fos.ledger\ \ \ \ \ \ \ \ \ \ \ .. \f[] .fi .IP .nf \f[C] $\ hledger\ \-f\ t.timedot\ \-\-alias\ /\\\\./=:\ bal\ date:2016/2/4 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 4.50\ \ fos \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 4.00\ \ \ \ hledger:timedot \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 0.50\ \ \ \ ledger \-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 4.50 \f[] .fi .PP Here is a sample.timedot. .SH "REPORTING BUGS" Report bugs at http://bugs.hledger.org (or on the #hledger IRC channel or hledger mail list) .SH AUTHORS Simon Michael and contributors .SH COPYRIGHT Copyright (C) 2007-2016 Simon Michael. .br Released under GNU GPL v3 or later. .SH SEE ALSO hledger(1), hledger\-ui(1), hledger\-web(1), hledger\-api(1), hledger_csv(5), hledger_journal(5), hledger_timeclock(5), hledger_timedot(5), ledger(1) http://hledger.org hledger-lib-1.2/doc/hledger_timedot.5.info0000644000000000000000000000651413067574764016675 0ustar0000000000000000This is hledger_timedot.5.info, produced by makeinfo version 6.0 from stdin.  File: hledger_timedot.5.info, Node: Top, Next: FILE FORMAT, Up: (dir) hledger_timedot(5) hledger 1.2 ****************************** Timedot is a plain text format for logging dated, categorised quantities (eg time), supported by hledger. It is convenient for approximate and retroactive time logging, eg when the real-time clock-in/out required with a timeclock file is too precise or too interruptive. It can be formatted like a bar chart, making clear at a glance where time was spent. Though called "timedot", the format does not specify the commodity being logged, so could represent other dated, quantifiable things. Eg you could record a single-entry journal of financial transactions, perhaps slightly more conveniently than with hledger_journal(5) format. * Menu: * FILE FORMAT::  File: hledger_timedot.5.info, Node: FILE FORMAT, Prev: Top, Up: Top 1 FILE FORMAT ************* A timedot file contains a series of day entries. A day entry begins with a date, and is followed by category/quantity pairs, one per line. Dates are hledger-style simple dates (see hledger_journal(5)). Categories are hledger-style account names, optionally indented. There must be at least two spaces between the category and the quantity. Quantities can be written in two ways: 1. a series of dots (period characters). Each dot represents "a quarter" - eg, a quarter hour. Spaces can be used to group dots into hours, for easier counting. 2. a number (integer or decimal), representing "units" - eg, hours. A good alternative when dots are cumbersome. (A number also can record negative quantities.) Blank lines and lines beginning with #, ; or * are ignored. An example: # on this day, 6h was spent on client work, 1.5h on haskell FOSS work, etc. 2016/2/1 inc:client1 .... .... .... .... .... .... fos:haskell .... .. biz:research . 2016/2/2 inc:client1 .... .... biz:research . Or with numbers: 2016/2/3 inc:client1 4 fos:hledger 3 biz:research 1 Reporting: $ hledger -f t.timedot print date:2016/2/2 2016/02/02 * (inc:client1) 2.00 2016/02/02 * (biz:research) 0.25 $ hledger -f t.timedot bal --daily --tree Balance changes in 2016/02/01-2016/02/03: || 2016/02/01d 2016/02/02d 2016/02/03d ============++======================================== biz || 0.25 0.25 1.00 research || 0.25 0.25 1.00 fos || 1.50 0 3.00 haskell || 1.50 0 0 hledger || 0 0 3.00 inc || 6.00 2.00 4.00 client1 || 6.00 2.00 4.00 ------------++---------------------------------------- || 7.75 2.25 8.00 I prefer to use period for separating account components. We can make this work with an account alias: 2016/2/4 fos.hledger.timedot 4 fos.ledger .. $ hledger -f t.timedot --alias /\\./=: bal date:2016/2/4 4.50 fos 4.00 hledger:timedot 0.50 ledger -------------------- 4.50 Here is a sample.timedot.  Tag Table: Node: Top78 Node: FILE FORMAT882 Ref: #file-format985  End Tag Table hledger-lib-1.2/doc/hledger_timedot.5.txt0000644000000000000000000001065013067574770016552 0ustar0000000000000000 hledger_timedot(5) hledger User Manuals hledger_timedot(5) NAME Timedot - hledger's human-friendly time logging format DESCRIPTION Timedot is a plain text format for logging dated, categorised quanti- ties (eg time), supported by hledger. It is convenient for approximate and retroactive time logging, eg when the real-time clock-in/out required with a timeclock file is too precise or too interruptive. It can be formatted like a bar chart, making clear at a glance where time was spent. Though called "timedot", the format does not specify the commodity being logged, so could represent other dated, quantifiable things. Eg you could record a single-entry journal of financial transactions, per- haps slightly more conveniently than with hledger_journal(5) format. FILE FORMAT A timedot file contains a series of day entries. A day entry begins with a date, and is followed by category/quantity pairs, one per line. Dates are hledger-style simple dates (see hledger_journal(5)). Cate- gories are hledger-style account names, optionally indented. There must be at least two spaces between the category and the quantity. Quantities can be written in two ways: 1. a series of dots (period characters). Each dot represents "a quar- ter" - eg, a quarter hour. Spaces can be used to group dots into hours, for easier counting. 2. a number (integer or decimal), representing "units" - eg, hours. A good alternative when dots are cumbersome. (A number also can record negative quantities.) Blank lines and lines beginning with #, ; or * are ignored. An exam- ple: # on this day, 6h was spent on client work, 1.5h on haskell FOSS work, etc. 2016/2/1 inc:client1 .... .... .... .... .... .... fos:haskell .... .. biz:research . 2016/2/2 inc:client1 .... .... biz:research . Or with numbers: 2016/2/3 inc:client1 4 fos:hledger 3 biz:research 1 Reporting: $ hledger -f t.timedot print date:2016/2/2 2016/02/02 * (inc:client1) 2.00 2016/02/02 * (biz:research) 0.25 $ hledger -f t.timedot bal --daily --tree Balance changes in 2016/02/01-2016/02/03: || 2016/02/01d 2016/02/02d 2016/02/03d ============++======================================== biz || 0.25 0.25 1.00 research || 0.25 0.25 1.00 fos || 1.50 0 3.00 haskell || 1.50 0 0 hledger || 0 0 3.00 inc || 6.00 2.00 4.00 client1 || 6.00 2.00 4.00 ------------++---------------------------------------- || 7.75 2.25 8.00 I prefer to use period for separating account components. We can make this work with an account alias: 2016/2/4 fos.hledger.timedot 4 fos.ledger .. $ hledger -f t.timedot --alias /\\./=: bal date:2016/2/4 4.50 fos 4.00 hledger:timedot 0.50 ledger -------------------- 4.50 Here is a sample.timedot. REPORTING BUGS Report bugs at http://bugs.hledger.org (or on the #hledger IRC channel or hledger mail list) AUTHORS Simon Michael and contributors COPYRIGHT Copyright (C) 2007-2016 Simon Michael. Released under GNU GPL v3 or later. SEE ALSO hledger(1), hledger-ui(1), hledger-web(1), hledger-api(1), hledger_csv(5), hledger_journal(5), hledger_timeclock(5), hledger_time- dot(5), ledger(1) http://hledger.org hledger 1.2 March 2017 hledger_timedot(5) hledger-lib-1.2/LICENSE0000644000000000000000000010451313035210046012723 0ustar0000000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . hledger-lib-1.2/Setup.hs0000644000000000000000000000005613035210046013347 0ustar0000000000000000import Distribution.Simple main = defaultMain hledger-lib-1.2/hledger-lib.cabal0000644000000000000000000002052213067575305015076 0ustar0000000000000000-- This file has been generated from package.yaml by hpack version 0.17.0. -- -- see: https://github.com/sol/hpack name: hledger-lib version: 1.2 synopsis: Core data types, parsers and functionality for the hledger accounting tools description: This is a reusable library containing hledger's core functionality. . hledger is a cross-platform program for tracking money, time, or any other commodity, using double-entry accounting and a simple, editable file format. It is inspired by and largely compatible with ledger(1). hledger provides command-line, curses and web interfaces, and aims to be a reliable, practical tool for daily use. category: Finance stability: stable homepage: http://hledger.org bug-reports: http://bugs.hledger.org author: Simon Michael maintainer: Simon Michael license: GPL-3 license-file: LICENSE tested-with: GHC==7.10.3, GHC==8.0 build-type: Simple cabal-version: >= 1.10 extra-source-files: CHANGES README data-files: doc/hledger_csv.5 doc/hledger_csv.5.info doc/hledger_csv.5.txt doc/hledger_journal.5 doc/hledger_journal.5.info doc/hledger_journal.5.txt doc/hledger_timeclock.5 doc/hledger_timeclock.5.info doc/hledger_timeclock.5.txt doc/hledger_timedot.5 doc/hledger_timedot.5.info doc/hledger_timedot.5.txt source-repository head type: git location: https://github.com/simonmichael/hledger flag oldtime description: If building with time < 1.5, also depend on old-locale. Set automatically by cabal. manual: False default: False library hs-source-dirs: ./. ghc-options: -Wall -fno-warn-unused-do-bind -fno-warn-name-shadowing -fno-warn-missing-signatures -fno-warn-type-defaults -fno-warn-orphans build-depends: base >=4.8 && <5 , base-compat >=0.8.1 , array , blaze-markup >=0.5.1 , bytestring , cmdargs >=0.10 && <0.11 , containers , csv , data-default >=0.5 , Decimal , deepseq , directory , filepath , hashtables >= 1.2 , megaparsec >=5.0 && < 5.3 , mtl , mtl-compat , old-time , pretty-show >=1.6.4 , regex-tdfa , safe >=0.2 , split >=0.1 && <0.3 , text >=1.2 && <1.3 , transformers >=0.2 && <0.6 , uglymemo , utf8-string >=0.3.5 && <1.1 , HUnit , parsec , semigroups if impl(ghc <7.6) build-depends: ghc-prim if flag(oldtime) build-depends: time <1.5 , old-locale else build-depends: time >=1.5 exposed-modules: Hledger Hledger.Data Hledger.Data.Account Hledger.Data.AccountName Hledger.Data.Amount Hledger.Data.Commodity Hledger.Data.Dates Hledger.Data.Journal Hledger.Data.Ledger Hledger.Data.MarketPrice Hledger.Data.Period Hledger.Data.StringFormat Hledger.Data.Posting Hledger.Data.RawOptions Hledger.Data.Timeclock Hledger.Data.Transaction Hledger.Data.AutoTransaction Hledger.Data.Types Hledger.Query Hledger.Read Hledger.Read.Common Hledger.Read.CsvReader Hledger.Read.JournalReader Hledger.Read.TimedotReader Hledger.Read.TimeclockReader Hledger.Reports Hledger.Reports.ReportOptions Hledger.Reports.BalanceHistoryReport Hledger.Reports.BalanceReport Hledger.Reports.EntriesReport Hledger.Reports.MultiBalanceReports Hledger.Reports.PostingsReport Hledger.Reports.TransactionsReports Hledger.Utils Hledger.Utils.Debug Hledger.Utils.Parse Hledger.Utils.Regex Hledger.Utils.String Hledger.Utils.Test Hledger.Utils.Text Hledger.Utils.Tree Hledger.Utils.UTF8IOCompat other-modules: Paths_hledger_lib default-language: Haskell2010 test-suite doctests type: exitcode-stdio-1.0 main-is: doctests.hs hs-source-dirs: ./. tests ghc-options: -Wall -fno-warn-unused-do-bind -fno-warn-name-shadowing -fno-warn-missing-signatures -fno-warn-type-defaults -fno-warn-orphans build-depends: base >=4.8 && <5 , base-compat >=0.8.1 , array , blaze-markup >=0.5.1 , bytestring , cmdargs >=0.10 && <0.11 , containers , csv , data-default >=0.5 , Decimal , deepseq , directory , filepath , hashtables >= 1.2 , megaparsec >=5.0 && < 5.3 , mtl , mtl-compat , old-time , pretty-show >=1.6.4 , regex-tdfa , safe >=0.2 , split >=0.1 && <0.3 , text >=1.2 && <1.3 , transformers >=0.2 && <0.6 , uglymemo , utf8-string >=0.3.5 && <1.1 , HUnit , doctest >=0.8 , Glob >=0.7 if impl(ghc <7.6) build-depends: ghc-prim other-modules: Hledger Hledger.Data Hledger.Data.Account Hledger.Data.AccountName Hledger.Data.Amount Hledger.Data.AutoTransaction Hledger.Data.Commodity Hledger.Data.Dates Hledger.Data.Journal Hledger.Data.Ledger Hledger.Data.MarketPrice Hledger.Data.Period Hledger.Data.Posting Hledger.Data.RawOptions Hledger.Data.StringFormat Hledger.Data.Timeclock Hledger.Data.Transaction Hledger.Data.Types Hledger.Query Hledger.Read Hledger.Read.Common Hledger.Read.CsvReader Hledger.Read.JournalReader Hledger.Read.TimeclockReader Hledger.Read.TimedotReader Hledger.Reports Hledger.Reports.BalanceHistoryReport Hledger.Reports.BalanceReport Hledger.Reports.EntriesReport Hledger.Reports.MultiBalanceReports Hledger.Reports.PostingsReport Hledger.Reports.ReportOptions Hledger.Reports.TransactionsReports Hledger.Utils Hledger.Utils.Debug Hledger.Utils.Parse Hledger.Utils.Regex Hledger.Utils.String Hledger.Utils.Test Hledger.Utils.Text Hledger.Utils.Tree Hledger.Utils.UTF8IOCompat default-language: Haskell2010 test-suite hunittests type: exitcode-stdio-1.0 main-is: hunittests.hs hs-source-dirs: ./. tests ghc-options: -Wall -fno-warn-unused-do-bind -fno-warn-name-shadowing -fno-warn-missing-signatures -fno-warn-type-defaults -fno-warn-orphans build-depends: base >=4.8 && <5 , base-compat >=0.8.1 , array , blaze-markup >=0.5.1 , bytestring , cmdargs >=0.10 && <0.11 , containers , csv , data-default >=0.5 , Decimal , deepseq , directory , filepath , hashtables >= 1.2 , megaparsec >=5.0 && < 5.3 , mtl , mtl-compat , old-time , pretty-show >=1.6.4 , regex-tdfa , safe >=0.2 , split >=0.1 && <0.3 , text >=1.2 && <1.3 , transformers >=0.2 && <0.6 , uglymemo , utf8-string >=0.3.5 && <1.1 , HUnit , hledger-lib , parsec , test-framework , test-framework-hunit if impl(ghc <7.6) build-depends: ghc-prim if flag(oldtime) build-depends: time <1.5 , old-locale else build-depends: time >=1.5 other-modules: Hledger Hledger.Data Hledger.Data.Account Hledger.Data.AccountName Hledger.Data.Amount Hledger.Data.AutoTransaction Hledger.Data.Commodity Hledger.Data.Dates Hledger.Data.Journal Hledger.Data.Ledger Hledger.Data.MarketPrice Hledger.Data.Period Hledger.Data.Posting Hledger.Data.RawOptions Hledger.Data.StringFormat Hledger.Data.Timeclock Hledger.Data.Transaction Hledger.Data.Types Hledger.Query Hledger.Read Hledger.Read.Common Hledger.Read.CsvReader Hledger.Read.JournalReader Hledger.Read.TimeclockReader Hledger.Read.TimedotReader Hledger.Reports Hledger.Reports.BalanceHistoryReport Hledger.Reports.BalanceReport Hledger.Reports.EntriesReport Hledger.Reports.MultiBalanceReports Hledger.Reports.PostingsReport Hledger.Reports.ReportOptions Hledger.Reports.TransactionsReports Hledger.Utils Hledger.Utils.Debug Hledger.Utils.Parse Hledger.Utils.Regex Hledger.Utils.String Hledger.Utils.Test Hledger.Utils.Text Hledger.Utils.Tree Hledger.Utils.UTF8IOCompat default-language: Haskell2010 hledger-lib-1.2/CHANGES0000644000000000000000000002537513067576425012746 0ustar0000000000000000API-ish changes in the hledger-lib package. See also the hledger and project change logs (for user-visible changes). # 1.2 (2016/3/31) ## journal format A pipe character can optionally be used to delimit payee names in transaction descriptions, for more accurate querying and pivoting by payee. Eg, for a description like `payee name | additional notes`, the two parts will be accessible as pseudo-fields/tags named `payee` and `note`. Some journal parse errors now show the range of lines involved, not just the first. ## ledger format The experimental `ledger:` reader based on the WIP ledger4 project has been disabled, reducing build dependencies. ## Misc Fix a bug when tying the knot between postings and their parent transaction, reducing memory usage by about 10% (#483) (Mykola Orliuk) Fix a few spaceleaks (#413) (Moritz Kiefer) Add Ledger.Parse.Text to package.yaml, fixing a potential build failure. Allow megaparsec 5.2 (#503) Rename optserror -> usageError, consolidate with other error functions # 1.1 (2016/12/31) ## journal format - balance assignments are now supported (#438, #129, #157, #288) This feature also brings a slight performance drop (~5%); optimisations welcome. - also recognise `*.hledger` files as hledger journal format ## ledger format - use ledger-parse from the ledger4 project as an alternate reader for C++ Ledger journals The idea is that some day we might get better compatibility with Ledger files this way. Right now this reader is not very useful and will be used only if you explicitly select it with a `ledger:` prefix. It parses transaction dates, descriptions, accounts and amounts, and ignores everything else. Amount parsing is delegated to hledger's journal parser, and malformed amounts might be silently ignored. This adds at least some of the following as new dependencies for hledger-lib: parsers, parsec, attoparsec, trifecta. ## misc - update base lower bound to enforce GHC 7.10+ hledger-lib had a valid install plan with GHC 7.8, but currently requires GHC 7.10 to compile. Now we require base 4.8+ everywhere to ensure the right GHC version at the start. - Hledger.Read api cleanups - rename dbgIO to dbg0IO, consistent with dbg0, and document a bug in dbg*IO - make readJournalFiles [f] equivalent to readJournalFile f (#437) - more general parser types enabling reuse outside of IO (#439) # 1.0.1 (2016/10/27) - allow megaparsec 5.0 or 5.1 # 1.0 (2016/10/26) ## timedot format - new "timedot" format for retroactive/approximate time logging. Timedot is a plain text format for logging dated, categorised quantities (eg time), supported by hledger. It is convenient for approximate and retroactive time logging, eg when the real-time clock-in/out required with a timeclock file is too precise or too interruptive. It can be formatted like a bar chart, making clear at a glance where time was spent. ## timeclock format - renamed "timelog" format to "timeclock", matching the emacs package - sessions can no longer span file boundaries (unclocked-out sessions will be auto-closed at the end of the file). - transaction ids now count up rather than down (#394) - timeclock files no longer support default year directives - removed old code for appending timeclock transactions to journal transactions. A holdover from the days when both were allowed in one file. ## csv format - fix empty field assignment parsing, rule parse errors after megaparsec port (#407) (Hans-Peter Deifel) ## journal format - journal files can now include timeclock or timedot files (#320) (but not yet CSV files). - fixed an issue with ordering of same-date transactions included from other files - the "commodity" directive and "format" subdirective are now supported, allowing full control of commodity style (#295) The commodity directive's format subdirective can now be used to override the inferred style for a commodity, eg to increase or decrease the precision. This is at least a good workaround for #295. - Ledger-style "apply account"/"end apply account" directives are now used to set a default parent account. - the Ledger-style "account" directive is now accepted (and ignored). - bracketed posting dates are more robust (#304) Bracketed posting dates were fragile; they worked only if you wrote full 10-character dates. Also some semantics were a bit unclear. Now they should be robust, and have been documented more clearly. This is a legacy undocumented Ledger syntax, but it improves compatibility and might be preferable to the more verbose "date:" tags if you write posting dates often (as I do). Internally, bracketed posting dates are no longer considered to be tags. Journal comment, tag, and posting date parsers have been reworked, all with doctests. - balance assertion failure messages are clearer - with --debug=2, more detail about balance assertions is shown. ## misc - file parsers have been ported from Parsec to Megaparsec \o/ (#289, #366) (Alexey Shmalko, Moritz Kiefer) - most hledger types have been converted from String to Text, reducing memory usage by 30%+ on large files - file parsers have been simplified for easier troubleshooting (#275). The journal/timeclock/timedot parsers, instead of constructing opaque journal update functions which are later applied to build the journal, now construct the journal directly by modifying the parser state. This is easier to understand and debug. It also rules out the possibility of journal updates being a space leak. (They weren't, in fact this change increased memory usage slightly, but that has been addressed in other ways). The ParsedJournal type alias has been added to distinguish "being-parsed" journals and "finalised" journals. - file format detection is more robust. The Journal, Timelog and Timedot readers' detectors now check each line in the sample data, not just the first one. I think the sample data is only about 30 chars right now, but even so this fixed a format detection issue I was seeing. Also, we now always try parsing stdin as journal format (not just sometimes). - all file formats now produce transaction ids, not just journal (#394) - git clone of the hledger repo on windows now works (#345) - added missing benchmark file (#342) - our stack.yaml files are more compatible across stack versions (#300) - use newer file-embed to fix ghci working directory dependence () - report more accurate dates in account transaction report when postings have their own dates (affects hledger-ui and hledger-web registers). The newly-named "transaction register date" is the date to be displayed for that transaction in a transaction register, for some current account and filter query. It is either the transaction date from the journal ("transaction general date"), or if postings to the current account and matched by the register's filter query have their own dates, the earliest of those posting dates. - simplify account transactions report's running total. The account transactions report used for hledger-ui and -web registers now gives either the "period total" or "historical total", depending strictly on the --historical flag. It doesn't try to indicate whether the historical total is the accurate historical balance (which depends on the user's report query). - reloading a file now preserves the effect of options, query arguments etc. - reloading a journal should now reload all included files as well. - the Hledger.Read.\* modules have been reorganised for better reuse. Hledger.Read.Utils has been renamed Hledger.Read.Common and holds low-level parsers & utilities; high-level read utilities are now in Hledger.Read. - clarify amount display style canonicalisation code and terminology a bit. Individual amounts still have styles; from these we derive the standard "commodity styles". In user docs, we might call these "commodity formats" since they can be controlled by the "format" subdirective in journal files. - Journal is now a monoid - expandPath now throws a proper IO error - more unit tests, start using doctest 0.27 (2015/10/30) - The main hledger types now derive NFData, which makes it easier to time things with criterion. - Utils has been split up more. - Utils.Regex: regular expression compilation has been memoized, and memoizing versions of regexReplace[CI] have been added, since compiling regular expressions every time seems to be quite expensive (#244). - Utils.String: strWidth is now aware of multi-line strings (#242). - Read: parsers now use a consistent p suffix. - New dependencies: deepseq, uglymemo. - All the hledger packages' cabal files are now generated from simpler, less redundant yaml files by hpack, in principle. In practice, manual fixups are still needed until hpack gets better, but it's still a win. 0.26 (2015/7/12) - allow year parser to handle arbitrarily large years - Journal's Show instance reported one too many accounts - some cleanup of debug trace helpers - tighten up some date and account name parsers (don't accept leading spaces; hadddocks) - drop regexpr dependency 0.25.1 (2015/4/29) - support/require base-compat >0.8 (#245) 0.25 (2015/4/7) - GHC 7.10 compatibility (#239) 0.24.1 (2015/3/15) - fix JournalReader "ctx" compilation warning - add some type signatures in Utils to help make ghci-web 0.24 (2014/12/25) - fix combineJournalUpdates folding order - fix a regexReplaceCI bug - fix a splitAtElement bug with adjacent separators - mostly replace slow regexpr with regex-tdfa (fixes #189) - use the modern Text.Parsec API - allow transformers 0.4* - regexReplace now supports backreferences - Transactions now remember their parse location in the journal file - export Regexp types, disambiguate CsvReader's similarly-named type - export failIfInvalidMonth/Day (fixes #216) - track the commodity of zero amounts when possible (useful eg for hledger-web's multi-commodity charts) - show posting dates in debug output - more debug helpers 0.23.3 (2014/9/12) - allow transformers 0.4* 0.23.2 (2014/5/8) - postingsReport: also fix date sorting of displayed postings (#184) 0.23.1 (2014/5/7) - postingsReport: with disordered journal entries, postings before the report start date could get wrongly included. (#184) 0.23 (2014/5/1) - orDatesFrom -> spanDefaultsFrom 0.22.2 (2014/4/16) - display years before 1000 with four digits, not three - avoid pretty-show to build with GHC < 7.4 - allow text 1.1, drop data-pprint to build with GHC 7.8.x 0.22.1 (2014/1/6) and older: see http://hledger.org/release-notes or doc/CHANGES.md. hledger-lib-1.2/README0000644000000000000000000000025113035210046012570 0ustar0000000000000000A reusable library containing hledger's core functionality. This is used by most hledger* packages for common data parsing, command line option handling, reporting etc.