hledger-lib-1.30/Hledger/0000755000000000000000000000000014434445206013361 5ustar0000000000000000hledger-lib-1.30/Hledger/Data/0000755000000000000000000000000014435646041014233 5ustar0000000000000000hledger-lib-1.30/Hledger/Data/JournalChecks/0000755000000000000000000000000014434445206016765 5ustar0000000000000000hledger-lib-1.30/Hledger/Read/0000755000000000000000000000000014434445206014234 5ustar0000000000000000hledger-lib-1.30/Hledger/Reports/0000755000000000000000000000000014436245522015020 5ustar0000000000000000hledger-lib-1.30/Hledger/Utils/0000755000000000000000000000000014436044164014461 5ustar0000000000000000hledger-lib-1.30/Text/0000755000000000000000000000000014434445206012733 5ustar0000000000000000hledger-lib-1.30/Text/Megaparsec/0000755000000000000000000000000014434445206015002 5ustar0000000000000000hledger-lib-1.30/Text/Tabular/0000755000000000000000000000000014434445206014325 5ustar0000000000000000hledger-lib-1.30/test/0000755000000000000000000000000014434445206012766 5ustar0000000000000000hledger-lib-1.30/Hledger.hs0000644000000000000000000000061514434445206013717 0ustar0000000000000000{-# LANGUAGE OverloadedStrings #-} module Hledger ( module X ,tests_Hledger ) where import Hledger.Data as X import Hledger.Read as X import Hledger.Reports as X import Hledger.Query as X import Hledger.Utils as X tests_Hledger = testGroup "Hledger" [ tests_Data ,tests_Query ,tests_Read ,tests_Reports ,tests_Utils ] hledger-lib-1.30/Hledger/Data.hs0000644000000000000000000000423614434445206014573 0ustar0000000000000000{-# LANGUAGE OverloadedStrings #-} {-| The Hledger.Data library allows parsing and querying of C++ ledger-style journal files. It generally provides a compatible subset of C++ ledger's functionality. This package re-exports all the Hledger.Data.* modules (except UTF8, which requires an explicit import.) -} module Hledger.Data ( module Hledger.Data.Account, module Hledger.Data.AccountName, module Hledger.Data.Amount, module Hledger.Data.Balancing, module Hledger.Data.Dates, module Hledger.Data.Errors, module Hledger.Data.Journal, module Hledger.Data.JournalChecks, module Hledger.Data.Json, module Hledger.Data.Ledger, module Hledger.Data.Period, module Hledger.Data.PeriodicTransaction, module Hledger.Data.Posting, module Hledger.Data.RawOptions, module Hledger.Data.StringFormat, module Hledger.Data.Timeclock, module Hledger.Data.Transaction, module Hledger.Data.TransactionModifier, module Hledger.Data.Types, module Hledger.Data.Valuation, tests_Data ) where import Test.Tasty (testGroup) import Hledger.Data.Account import Hledger.Data.AccountName import Hledger.Data.Amount import Hledger.Data.Balancing import Hledger.Data.Dates import Hledger.Data.Errors import Hledger.Data.Journal import Hledger.Data.JournalChecks import Hledger.Data.Json import Hledger.Data.Ledger import Hledger.Data.Period import Hledger.Data.PeriodicTransaction import Hledger.Data.Posting import Hledger.Data.RawOptions import Hledger.Data.StringFormat import Hledger.Data.Timeclock import Hledger.Data.Transaction import Hledger.Data.TransactionModifier import Hledger.Data.Types hiding (MixedAmountKey, Mixed) import Hledger.Data.Valuation tests_Data = testGroup "Data" [ tests_AccountName ,tests_Amount ,tests_Dates ,tests_Balancing ,tests_Journal ,tests_Ledger ,tests_Posting ,tests_Valuation ,tests_StringFormat ,tests_Timeclock ,tests_Transaction ] hledger-lib-1.30/Hledger/Data/Account.hs0000644000000000000000000002647614434445206016201 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-| An 'Account' has a name, a list of subaccounts, an optional parent account, and subaccounting-excluding and -including balances. -} module Hledger.Data.Account ( nullacct , accountsFromPostings , accountTree , showAccounts , showAccountsBoringFlag , printAccounts , lookupAccount , parentAccounts , accountsLevels , mapAccounts , anyAccounts , filterAccounts , sumAccounts , clipAccounts , clipAccountsAndAggregate , pruneAccounts , flattenAccounts , accountSetDeclarationInfo , sortAccountNamesByDeclaration , sortAccountTreeByAmount ) where import qualified Data.HashSet as HS import qualified Data.HashMap.Strict as HM import Data.List (find, foldl', sortOn) import Data.List.Extra (groupOn) import qualified Data.Map as M import Data.Ord (Down(..)) import Safe (headMay) import Text.Printf (printf) import Hledger.Data.AccountName (expandAccountName, clipOrEllipsifyAccountName) import Hledger.Data.Amount import Hledger.Data.Types -- deriving instance Show Account instance Show Account where show Account{..} = printf "Account %s (boring:%s, postings:%d, ebalance:%s, ibalance:%s)" aname (if aboring then "y" else "n" :: String) anumpostings (wbUnpack $ showMixedAmountB noColour aebalance) (wbUnpack $ showMixedAmountB noColour aibalance) instance Eq Account where (==) a b = aname a == aname b -- quick equality test for speed -- and -- [ aname a == aname b -- -- , aparent a == aparent b -- avoid infinite recursion -- , asubs a == asubs b -- , aebalance a == aebalance b -- , aibalance a == aibalance b -- ] nullacct = Account { aname = "" , adeclarationinfo = Nothing , asubs = [] , aparent = Nothing , aboring = False , anumpostings = 0 , aebalance = nullmixedamt , aibalance = nullmixedamt } -- | Derive 1. an account tree and 2. each account's total exclusive -- and inclusive changes from a list of postings. -- This is the core of the balance command (and of *ledger). -- The accounts are returned as a list in flattened tree order, -- and also reference each other as a tree. -- (The first account is the root of the tree.) accountsFromPostings :: [Posting] -> [Account] accountsFromPostings ps = let summed = foldr (\p -> HM.insertWith addAndIncrement (paccount p) (1, pamount p)) mempty ps where addAndIncrement (n, a) (m, b) = (n + m, a `maPlus` b) acctstree = accountTree "root" $ HM.keys summed acctswithebals = mapAccounts setnumpsebalance acctstree where setnumpsebalance a = a{anumpostings=numps, aebalance=total} where (numps, total) = HM.lookupDefault (0, nullmixedamt) (aname a) summed acctswithibals = sumAccounts acctswithebals acctswithparents = tieAccountParents acctswithibals acctsflattened = flattenAccounts acctswithparents in acctsflattened -- | Convert a list of account names to a tree of Account objects, -- with just the account names filled in. -- A single root account with the given name is added. accountTree :: AccountName -> [AccountName] -> Account accountTree rootname as = nullacct{aname=rootname, asubs=map (uncurry accountTree') $ M.assocs m } where T m = treeFromPaths $ map expandAccountName as :: FastTree AccountName accountTree' a (T m') = nullacct{ aname=a ,asubs=map (uncurry accountTree') $ M.assocs m' } -- | An efficient-to-build tree suggested by Cale Gibbard, probably -- better than accountNameTreeFrom. newtype FastTree a = T (M.Map a (FastTree a)) deriving (Show, Eq, Ord) mergeTrees :: (Ord a) => FastTree a -> FastTree a -> FastTree a mergeTrees (T m) (T m') = T (M.unionWith mergeTrees m m') treeFromPath :: [a] -> FastTree a treeFromPath [] = T M.empty treeFromPath (x:xs) = T (M.singleton x (treeFromPath xs)) treeFromPaths :: (Ord a) => [[a]] -> FastTree a treeFromPaths = foldl' mergeTrees (T M.empty) . map treeFromPath -- | Tie the knot so all subaccounts' parents are set correctly. tieAccountParents :: Account -> Account tieAccountParents = tie Nothing where tie parent a@Account{..} = a' where a' = a{aparent=parent, asubs=map (tie (Just a')) asubs} -- | Get this account's parent accounts, from the nearest up to the root. parentAccounts :: Account -> [Account] parentAccounts Account{aparent=Nothing} = [] parentAccounts Account{aparent=Just a} = a:parentAccounts a -- | List the accounts at each level of the account tree. accountsLevels :: Account -> [[Account]] accountsLevels = takeWhile (not . null) . iterate (concatMap asubs) . (:[]) -- | Map a (non-tree-structure-modifying) function over this and sub accounts. mapAccounts :: (Account -> Account) -> Account -> Account mapAccounts f a = f a{asubs = map (mapAccounts f) $ asubs a} -- | Is the predicate true on any of this account or its subaccounts ? anyAccounts :: (Account -> Bool) -> Account -> Bool anyAccounts p a | p a = True | otherwise = any (anyAccounts p) $ asubs a -- | Add subaccount-inclusive balances to an account tree. sumAccounts :: Account -> Account sumAccounts a | null $ asubs a = a{aibalance=aebalance a} | otherwise = a{aibalance=ibal, asubs=subs} where subs = map sumAccounts $ asubs a ibal = maSum $ aebalance a : map aibalance subs -- | Remove all subaccounts below a certain depth. clipAccounts :: Int -> Account -> Account clipAccounts 0 a = a{asubs=[]} clipAccounts d a = a{asubs=subs} where subs = map (clipAccounts (d-1)) $ asubs a -- | Remove subaccounts below the specified depth, aggregating their balance at the depth limit -- (accounts at the depth limit will have any sub-balances merged into their exclusive balance). -- If the depth is Nothing, return the original accounts clipAccountsAndAggregate :: Maybe Int -> [Account] -> [Account] clipAccountsAndAggregate Nothing as = as clipAccountsAndAggregate (Just d) as = combined where clipped = [a{aname=clipOrEllipsifyAccountName (Just d) $ aname a} | a <- as] combined = [a{aebalance=maSum $ map aebalance same} | same@(a:_) <- groupOn aname clipped] {- test cases, assuming d=1: assets:cash 1 1 assets:checking 1 1 -> as: [assets:cash 1 1, assets:checking 1 1] clipped: [assets 1 1, assets 1 1] combined: [assets 2 2] assets 0 2 assets:cash 1 1 assets:checking 1 1 -> as: [assets 0 2, assets:cash 1 1, assets:checking 1 1] clipped: [assets 0 2, assets 1 1, assets 1 1] combined: [assets 2 2] assets 0 2 assets:bank 1 2 assets:bank:checking 1 1 -> as: [assets 0 2, assets:bank 1 2, assets:bank:checking 1 1] clipped: [assets 0 2, assets 1 2, assets 1 1] combined: [assets 2 2] -} -- | Remove all leaf accounts and subtrees matching a predicate. pruneAccounts :: (Account -> Bool) -> Account -> Maybe Account pruneAccounts p = headMay . prune where prune a | null prunedsubs = if p a then [] else [a'] | otherwise = [a'] where prunedsubs = concatMap prune $ asubs a a' = a{asubs=prunedsubs} -- | Flatten an account tree into a list, which is sometimes -- convenient. Note since accounts link to their parents/subs, the -- tree's structure remains intact and can still be used. It's a tree/list! flattenAccounts :: Account -> [Account] flattenAccounts a = squish a [] where squish a' as = a' : Prelude.foldr squish as (asubs a') -- | Filter an account tree (to a list). filterAccounts :: (Account -> Bool) -> Account -> [Account] filterAccounts p a | p a = a : concatMap (filterAccounts p) (asubs a) | otherwise = concatMap (filterAccounts p) (asubs a) -- | Sort each group of siblings in an account tree by inclusive amount, -- so that the accounts with largest normal balances are listed first. -- The provided normal balance sign determines whether normal balances -- are negative or positive, affecting the sort order. Ie, -- if balances are normally negative, then the most negative balances -- sort first, and vice versa. sortAccountTreeByAmount :: NormalSign -> Account -> Account sortAccountTreeByAmount normalsign = mapAccounts $ \a -> a{asubs=sortSubs $ asubs a} where sortSubs = case normalsign of NormallyPositive -> sortOn (\a -> (Down $ amt a, aname a)) NormallyNegative -> sortOn (\a -> (amt a, aname a)) amt = mixedAmountStripPrices . aibalance -- | Add extra info for this account derived from the Journal's -- account directives, if any (comment, tags, declaration order..). accountSetDeclarationInfo :: Journal -> Account -> Account accountSetDeclarationInfo j a@Account{..} = a{ adeclarationinfo=lookup aname $ jdeclaredaccounts j } -- | Sort account names by the order in which they were declared in -- the journal, at each level of the account tree (ie within each -- group of siblings). Undeclared accounts are sorted last and -- alphabetically. -- This is hledger's default sort for reports organised by account. -- The account list is converted to a tree temporarily, adding any -- missing parents; these can be kept (suitable for a tree-mode report) -- or removed (suitable for a flat-mode report). -- sortAccountNamesByDeclaration :: Journal -> Bool -> [AccountName] -> [AccountName] sortAccountNamesByDeclaration j keepparents as = (if keepparents then id else filter (`HS.member` HS.fromList as)) $ -- maybe discard missing parents that were added map aname $ -- keep just the names drop 1 $ -- drop the root node that was added flattenAccounts $ -- convert to an account list sortAccountTreeByDeclaration $ -- sort by declaration order (and name) mapAccounts (accountSetDeclarationInfo j) $ -- add declaration order info accountTree "root" -- convert to an account tree as -- | Sort each group of siblings in an account tree by declaration order, then account name. -- So each group will contain first the declared accounts, -- in the same order as their account directives were parsed, -- and then the undeclared accounts, sorted by account name. sortAccountTreeByDeclaration :: Account -> Account sortAccountTreeByDeclaration a | null $ asubs a = a | otherwise = a{asubs= sortOn accountDeclarationOrderAndName $ map sortAccountTreeByDeclaration $ asubs a } accountDeclarationOrderAndName :: Account -> (Int, AccountName) accountDeclarationOrderAndName a = (adeclarationorder', aname a) where adeclarationorder' = maybe maxBound adideclarationorder $ adeclarationinfo a -- | Search an account list by name. lookupAccount :: AccountName -> [Account] -> Maybe Account lookupAccount a = find ((==a).aname) -- debug helpers printAccounts :: Account -> IO () printAccounts = putStrLn . showAccounts showAccounts = unlines . map showAccountDebug . flattenAccounts showAccountsBoringFlag = unlines . map (show . aboring) . flattenAccounts showAccountDebug a = printf "%-25s %4s %4s %s" (aname a) (wbUnpack . showMixedAmountB noColour $ aebalance a) (wbUnpack . showMixedAmountB noColour $ aibalance a) (if aboring a then "b" else " " :: String) hledger-lib-1.30/Hledger/Data/AccountName.hs0000644000000000000000000004174114434445206016772 0ustar0000000000000000{-# LANGUAGE NoMonomorphismRestriction #-} {-# LANGUAGE OverloadedStrings #-} {-| 'AccountName's are strings like @assets:cash:petty@, with multiple components separated by ':'. From a set of these we derive the account hierarchy. -} module Hledger.Data.AccountName ( accountLeafName ,accountNameComponents ,accountNameDrop ,accountNameFromComponents ,accountNameLevel ,accountNameToAccountOnlyRegex ,accountNameToAccountOnlyRegexCI ,accountNameToAccountRegex ,accountNameToAccountRegexCI ,accountNameTreeFrom ,accountSummarisedName ,accountNameInferType ,accountNameType ,assetAccountRegex ,cashAccountRegex ,liabilityAccountRegex ,equityAccountRegex ,conversionAccountRegex ,revenueAccountRegex ,expenseAccountRegex ,acctsep ,acctsepchar ,clipAccountName ,clipOrEllipsifyAccountName ,elideAccountName ,escapeName ,expandAccountName ,expandAccountNames ,isAccountNamePrefixOf -- ,isAccountRegex ,isSubAccountNameOf ,parentAccountName ,parentAccountNames ,subAccountNamesFrom ,topAccountNames ,unbudgetedAccountName ,accountNamePostingType ,accountNameWithoutPostingType ,accountNameWithPostingType ,joinAccountNames ,concatAccountNames ,accountNameApplyAliases ,accountNameApplyAliasesMemo ,tests_AccountName ) where import Control.Applicative ((<|>)) import Control.Monad (foldM) import Data.Foldable (asum, toList) import qualified Data.List.NonEmpty as NE import qualified Data.Map as M import Data.MemoUgly (memo) import qualified Data.Set as S import Data.Text (Text) import qualified Data.Text as T import Data.Tree (Tree(..)) import Safe import Text.DocLayout (realLength) import Hledger.Data.Types import Hledger.Utils -- $setup -- >>> :set -XOverloadedStrings acctsepchar :: Char acctsepchar = ':' acctsep :: Text acctsep = T.pack [acctsepchar] -- accountNameComponents :: AccountName -> [String] -- accountNameComponents = splitAtElement acctsepchar accountNameComponents :: AccountName -> [Text] accountNameComponents = T.splitOn acctsep accountNameFromComponents :: [Text] -> AccountName accountNameFromComponents = T.intercalate acctsep accountLeafName :: AccountName -> Text accountLeafName = last . accountNameComponents -- | Truncate all account name components but the last to two characters. accountSummarisedName :: AccountName -> Text accountSummarisedName a -- length cs > 1 = take 2 (head cs) ++ ":" ++ a' | length cs > 1 = T.intercalate ":" (map (T.take 2) $ init cs) <> ":" <> a' | otherwise = a' where cs = accountNameComponents a a' = accountLeafName a -- | Regular expressions matching common English top-level account names, -- used as a fallback when account types are not declared. assetAccountRegex = toRegexCI' "^assets?(:|$)" cashAccountRegex = toRegexCI' "^assets?(:.+)?:(cash|bank|che(ck|que?)(ing)?|savings?|current)(:|$)" liabilityAccountRegex = toRegexCI' "^(debts?|liabilit(y|ies))(:|$)" equityAccountRegex = toRegexCI' "^equity(:|$)" conversionAccountRegex = toRegexCI' "^equity:(trad(e|ing)|conversion)s?(:|$)" revenueAccountRegex = toRegexCI' "^(income|revenue)s?(:|$)" expenseAccountRegex = toRegexCI' "^expenses?(:|$)" -- | Try to guess an account's type from its name, -- matching common English top-level account names. accountNameInferType :: AccountName -> Maybe AccountType accountNameInferType a | regexMatchText cashAccountRegex a = Just Cash | regexMatchText assetAccountRegex a = Just Asset | regexMatchText liabilityAccountRegex a = Just Liability | regexMatchText conversionAccountRegex a = Just Conversion | regexMatchText equityAccountRegex a = Just Equity | regexMatchText revenueAccountRegex a = Just Revenue | regexMatchText expenseAccountRegex a = Just Expense | otherwise = Nothing -- Extract the 'AccountType' of an 'AccountName' by looking it up in the -- provided Map, traversing the parent accounts if necessary. If none of those -- work, try 'accountNameInferType'. accountNameType :: M.Map AccountName AccountType -> AccountName -> Maybe AccountType accountNameType atypes a = asum (map (`M.lookup` atypes) $ a : parentAccountNames a) <|> accountNameInferType a accountNameLevel :: AccountName -> Int accountNameLevel "" = 0 accountNameLevel a = T.length (T.filter (==acctsepchar) a) + 1 -- | A top-level account prefixed to some accounts in budget reports. -- Defined here so it can be ignored by accountNameDrop. unbudgetedAccountName :: T.Text unbudgetedAccountName = "" accountNamePostingType :: AccountName -> PostingType accountNamePostingType a | T.null a = RegularPosting | T.head a == '[' && T.last a == ']' = BalancedVirtualPosting | T.head a == '(' && T.last a == ')' = VirtualPosting | otherwise = RegularPosting accountNameWithoutPostingType :: AccountName -> AccountName accountNameWithoutPostingType a = case accountNamePostingType a of BalancedVirtualPosting -> textUnbracket a VirtualPosting -> textUnbracket a RegularPosting -> a accountNameWithPostingType :: PostingType -> AccountName -> AccountName accountNameWithPostingType BalancedVirtualPosting = wrap "[" "]" . accountNameWithoutPostingType accountNameWithPostingType VirtualPosting = wrap "(" ")" . accountNameWithoutPostingType accountNameWithPostingType RegularPosting = accountNameWithoutPostingType -- | Prefix one account name to another, preserving posting type -- indicators like concatAccountNames. joinAccountNames :: AccountName -> AccountName -> AccountName joinAccountNames a b = concatAccountNames $ filter (not . T.null) [a,b] -- | Join account names into one. If any of them has () or [] posting type -- indicators, these (the first type encountered) will also be applied to -- the resulting account name. concatAccountNames :: [AccountName] -> AccountName concatAccountNames as = accountNameWithPostingType t $ T.intercalate ":" $ map accountNameWithoutPostingType as where t = headDef RegularPosting $ filter (/= RegularPosting) $ map accountNamePostingType as -- | Rewrite an account name using all matching aliases from the given list, in sequence. -- Each alias sees the result of applying the previous aliases. -- Or, return any error arising from a bad regular expression in the aliases. accountNameApplyAliases :: [AccountAlias] -> AccountName -> Either RegexError AccountName accountNameApplyAliases aliases a = let (name,typ) = (accountNameWithoutPostingType a, accountNamePostingType a) in foldM (\acct alias -> dbg6 "result" $ aliasReplace (dbg6 "alias" alias) (dbg6 "account" acct)) name aliases >>= Right . accountNameWithPostingType typ -- | Memoising version of accountNameApplyAliases, maybe overkill. accountNameApplyAliasesMemo :: [AccountAlias] -> AccountName -> Either RegexError AccountName accountNameApplyAliasesMemo aliases = memo (accountNameApplyAliases aliases) -- XXX re-test this memoisation -- aliasMatches :: AccountAlias -> AccountName -> Bool -- aliasMatches (BasicAlias old _) a = old `isAccountNamePrefixOf` a -- aliasMatches (RegexAlias re _) a = regexMatchesCI re a aliasReplace :: AccountAlias -> AccountName -> Either RegexError AccountName aliasReplace (BasicAlias old new) a | old `isAccountNamePrefixOf` a || old == a = Right $ new <> T.drop (T.length old) a | otherwise = Right a aliasReplace (RegexAlias re repl) a = fmap T.pack . regexReplace re repl $ T.unpack a -- XXX -- | Remove some number of account name components from the front of the account name. -- If the special "" top-level account is present, it is preserved and -- dropping affects the rest of the account name. accountNameDrop :: Int -> AccountName -> AccountName accountNameDrop n a | a == unbudgetedAccountName = a | unbudgetedAccountAndSep `T.isPrefixOf` a = case accountNameDrop n $ T.drop (T.length unbudgetedAccountAndSep) a of "" -> unbudgetedAccountName a' -> unbudgetedAccountAndSep <> a' | otherwise = accountNameFromComponentsOrElide . drop n $ accountNameComponents a where unbudgetedAccountAndSep = unbudgetedAccountName <> acctsep accountNameFromComponentsOrElide [] = "..." accountNameFromComponentsOrElide xs = accountNameFromComponents xs -- | Sorted unique account names implied by these account names, -- ie these plus all their parent accounts up to the root. -- Eg: ["a:b:c","d:e"] -> ["a","a:b","a:b:c","d","d:e"] expandAccountNames :: [AccountName] -> [AccountName] expandAccountNames = toList . foldMap (S.fromList . expandAccountName) -- | "a:b:c" -> ["a","a:b","a:b:c"] expandAccountName :: AccountName -> [AccountName] expandAccountName = map accountNameFromComponents . NE.tail . NE.inits . accountNameComponents -- | ["a:b:c","d:e"] -> ["a","d"] topAccountNames :: [AccountName] -> [AccountName] topAccountNames = filter ((1==) . accountNameLevel) . expandAccountNames parentAccountName :: AccountName -> AccountName parentAccountName = accountNameFromComponents . init . accountNameComponents parentAccountNames :: AccountName -> [AccountName] parentAccountNames a = parentAccountNames' $ parentAccountName a where parentAccountNames' "" = [] parentAccountNames' a2 = a2 : parentAccountNames' (parentAccountName a2) -- | Is the first account a parent or other ancestor of (and not the same as) the second ? isAccountNamePrefixOf :: AccountName -> AccountName -> Bool isAccountNamePrefixOf = T.isPrefixOf . (<> acctsep) isSubAccountNameOf :: AccountName -> AccountName -> Bool s `isSubAccountNameOf` p = (p `isAccountNamePrefixOf` s) && (accountNameLevel s == (accountNameLevel p + 1)) -- | From a list of account names, select those which are direct -- subaccounts of the given account name. subAccountNamesFrom :: [AccountName] -> AccountName -> [AccountName] subAccountNamesFrom accts a = filter (`isSubAccountNameOf` a) accts -- | Convert a list of account names to a tree. accountNameTreeFrom :: [AccountName] -> Tree AccountName accountNameTreeFrom accts = Node "root" (accounttreesfrom (topAccountNames accts)) where accounttreesfrom :: [AccountName] -> [Tree AccountName] accounttreesfrom [] = [] accounttreesfrom as = [Node a (accounttreesfrom $ subs a) | a <- as] subs = subAccountNamesFrom (expandAccountNames accts) --nullaccountnametree = Node "root" [] -- | Elide an account name to fit in the specified width. -- From the ledger 2.6 news: -- -- @ -- What Ledger now does is that if an account name is too long, it will -- start abbreviating the first parts of the account name down to two -- letters in length. If this results in a string that is still too -- long, the front will be elided -- not the end. For example: -- -- Expenses:Cash ; OK, not too long -- Ex:Wednesday:Cash ; "Expenses" was abbreviated to fit -- Ex:We:Afternoon:Cash ; "Expenses" and "Wednesday" abbreviated -- ; Expenses:Wednesday:Afternoon:Lunch:Snack:Candy:Chocolate:Cash -- ..:Af:Lu:Sn:Ca:Ch:Cash ; Abbreviated and elided! -- @ elideAccountName :: Int -> AccountName -> AccountName elideAccountName width s -- XXX special case for transactions register's multi-account pseudo-names | " (split)" `T.isSuffixOf` s = let names = T.splitOn ", " $ T.take (T.length s - 8) s widthpername = max 0 (width - 8 - 2 * (max 1 (length names) - 1)) `div` length names in fitText Nothing (Just width) True False $ (<>" (split)") $ T.intercalate ", " [accountNameFromComponents $ elideparts widthpername [] $ accountNameComponents s' | s' <- names] | otherwise = fitText Nothing (Just width) True False $ accountNameFromComponents $ elideparts width [] $ accountNameComponents s where elideparts :: Int -> [Text] -> [Text] -> [Text] elideparts w done ss | realLength (accountNameFromComponents $ done++ss) <= w = done++ss | length ss > 1 = elideparts w (done++[textTakeWidth 2 $ head ss]) (tail ss) | otherwise = done++ss -- | Keep only the first n components of an account name, where n -- is a positive integer. If n is Just 0, returns the empty string, if n is -- Nothing, return the full name. clipAccountName :: Maybe Int -> AccountName -> AccountName clipAccountName Nothing = id clipAccountName (Just n) = accountNameFromComponents . take n . accountNameComponents -- | Keep only the first n components of an account name, where n -- is a positive integer. If n is Just 0, returns "...", if n is Nothing, return -- the full name. clipOrEllipsifyAccountName :: Maybe Int -> AccountName -> AccountName clipOrEllipsifyAccountName (Just 0) = const "..." clipOrEllipsifyAccountName n = clipAccountName n -- | Escape an AccountName for use within a regular expression. -- >>> putStr . T.unpack $ escapeName "First?!#$*?$(*) !@^#*? %)*!@#" -- First\?!#\$\*\?\$\(\*\) !@\^#\*\? %\)\*!@# escapeName :: AccountName -> Text escapeName = T.concatMap escapeChar where escapeChar c = if c `elem` escapedChars then T.snoc "\\" c else T.singleton c escapedChars = ['[', '?', '+', '|', '(', ')', '*', '$', '^', '\\'] -- | Convert an account name to a regular expression matching it and its subaccounts. accountNameToAccountRegex :: AccountName -> Regexp accountNameToAccountRegex a = toRegex' $ "^" <> escapeName a <> "(:|$)" -- PARTIAL: Is this safe after escapeName? -- | Convert an account name to a regular expression matching it and its subaccounts, -- case insensitively. accountNameToAccountRegexCI :: AccountName -> Regexp accountNameToAccountRegexCI a = toRegexCI' $ "^" <> escapeName a <> "(:|$)" -- PARTIAL: Is this safe after escapeName? -- | Convert an account name to a regular expression matching it but not its subaccounts. accountNameToAccountOnlyRegex :: AccountName -> Regexp accountNameToAccountOnlyRegex a = toRegex' $ "^" <> escapeName a <> "$" -- PARTIAL: Is this safe after escapeName? -- | Convert an account name to a regular expression matching it but not its subaccounts, -- case insensitively. accountNameToAccountOnlyRegexCI :: AccountName -> Regexp accountNameToAccountOnlyRegexCI a = toRegexCI' $ "^" <> escapeName a <> "$" -- PARTIAL: Is this safe after escapeName? -- -- | Does this string look like an exact account-matching regular expression ? --isAccountRegex :: String -> Bool --isAccountRegex s = take 1 s == "^" && take 5 (reverse s) == ")$|:(" tests_AccountName = testGroup "AccountName" [ testCase "accountNameTreeFrom" $ do accountNameTreeFrom ["a"] @?= Node "root" [Node "a" []] accountNameTreeFrom ["a","b"] @?= Node "root" [Node "a" [], Node "b" []] accountNameTreeFrom ["a","a:b"] @?= Node "root" [Node "a" [Node "a:b" []]] accountNameTreeFrom ["a:b:c"] @?= Node "root" [Node "a" [Node "a:b" [Node "a:b:c" []]]] ,testCase "expandAccountNames" $ do expandAccountNames ["assets:cash","assets:checking","expenses:vacation"] @?= ["assets","assets:cash","assets:checking","expenses","expenses:vacation"] ,testCase "isAccountNamePrefixOf" $ do "assets" `isAccountNamePrefixOf` "assets" @?= False "assets" `isAccountNamePrefixOf` "assets:bank" @?= True "assets" `isAccountNamePrefixOf` "assets:bank:checking" @?= True "my assets" `isAccountNamePrefixOf` "assets:bank" @?= False ,testCase "isSubAccountNameOf" $ do "assets" `isSubAccountNameOf` "assets" @?= False "assets:bank" `isSubAccountNameOf` "assets" @?= True "assets:bank:checking" `isSubAccountNameOf` "assets" @?= False "assets:bank" `isSubAccountNameOf` "my assets" @?= False ,testCase "accountNameInferType" $ do accountNameInferType "assets" @?= Just Asset accountNameInferType "assets:cash" @?= Just Cash accountNameInferType "assets:A/R" @?= Just Asset accountNameInferType "liabilities" @?= Just Liability accountNameInferType "equity" @?= Just Equity accountNameInferType "equity:conversion" @?= Just Conversion accountNameInferType "expenses" @?= Just Expense accountNameInferType "revenues" @?= Just Revenue accountNameInferType "revenue" @?= Just Revenue accountNameInferType "income" @?= Just Revenue ,testCase "joinAccountNames" $ do joinAccountNames "assets" "cash" @?= "assets:cash" joinAccountNames "assets:cash" "a" @?= "assets:cash:a" joinAccountNames "assets" "(cash)" @?= "(assets:cash)" joinAccountNames "assets" "[cash]" @?= "[assets:cash]" joinAccountNames "(assets)" "cash" @?= "(assets:cash)" joinAccountNames "" "assets" @?= "assets" joinAccountNames "assets" "" @?= "assets" ,testCase "concatAccountNames" $ do concatAccountNames ["assets", "cash"] @?= "assets:cash" concatAccountNames ["assets:cash", "a"] @?= "assets:cash:a" concatAccountNames ["assets", "(cash)"] @?= "(assets:cash)" concatAccountNames ["assets", "[cash]"] @?= "[assets:cash]" concatAccountNames ["(assets)", "cash"] @?= "(assets:cash)" concatAccountNames ["", "assets"] @?= ":assets" concatAccountNames ["assets", ""] @?= "assets:" ] hledger-lib-1.30/Hledger/Data/Amount.hs0000644000000000000000000013131314434445206016033 0ustar0000000000000000{-| A simple 'Amount' is some quantity of money, shares, or anything else. It has a (possibly null) 'CommoditySymbol' and a numeric quantity: @ $1 £-50 EUR 3.44 GOOG 500 1.5h 90 apples 0 @ It may also have an assigned 'Price', representing this amount's per-unit or total cost in a different commodity. If present, this is rendered like so: @ EUR 2 \@ $1.50 (unit price) EUR 2 \@\@ $3 (total price) @ A 'MixedAmount' is zero or more simple amounts, so can represent multiple commodities; this is the type most often used: @ 0 $50 + EUR 3 16h + $13.55 + AAPL 500 + 6 oranges @ A mixed amount is always \"normalised\", it has no more than one amount in each commodity and price. When calling 'amounts' it will have no zero amounts, or just a single zero amount and no other amounts. Limited arithmetic with simple and mixed amounts is supported, best used with similar amounts since it mostly ignores assigned prices and commodity exchange rates. -} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} module Hledger.Data.Amount ( -- * Commodity showCommoditySymbol, isNonsimpleCommodityChar, quoteCommoditySymbolIfNeeded, -- * Amount nullamt, missingamt, num, usd, eur, gbp, per, hrs, at, (@@), amountWithCommodity, -- ** arithmetic amountCost, amountIsZero, amountLooksZero, divideAmount, multiplyAmount, -- ** rendering AmountDisplayOpts(..), noColour, noPrice, oneLine, csvDisplay, amountstyle, styleAmount, styleAmountExceptPrecision, amountUnstyled, showAmountB, showAmount, showAmountPrice, cshowAmount, showAmountWithZeroCommodity, showAmountDebug, showAmountWithoutPrice, amountSetPrecision, withPrecision, amountSetFullPrecision, setAmountInternalPrecision, withInternalPrecision, setAmountDecimalPoint, withDecimalPoint, amountStripPrices, canonicaliseAmount, -- * MixedAmount nullmixedamt, missingmixedamt, isMissingMixedAmount, mixed, mixedAmount, maAddAmount, maAddAmounts, amounts, amountsRaw, maCommodities, filterMixedAmount, filterMixedAmountByCommodity, mapMixedAmount, unifyMixedAmount, mixedAmountStripPrices, -- ** arithmetic mixedAmountCost, maNegate, maPlus, maMinus, maSum, divideMixedAmount, multiplyMixedAmount, averageMixedAmounts, isNegativeAmount, isNegativeMixedAmount, mixedAmountIsZero, maIsZero, maIsNonZero, mixedAmountLooksZero, -- ** rendering styleMixedAmount, mixedAmountUnstyled, showMixedAmount, showMixedAmountOneLine, showMixedAmountDebug, showMixedAmountWithoutPrice, showMixedAmountOneLineWithoutPrice, showMixedAmountElided, showMixedAmountWithZeroCommodity, showMixedAmountB, showMixedAmountLinesB, wbToText, wbUnpack, mixedAmountSetPrecision, mixedAmountSetFullPrecision, canonicaliseMixedAmount, -- * misc. tests_Amount ) where import Prelude hiding (Applicative(..)) import Control.Applicative (Applicative(..)) import Control.Monad (foldM) import Data.Char (isDigit) import Data.Decimal (DecimalRaw(..), decimalPlaces, normalizeDecimal, roundTo) import Data.Default (Default(..)) import Data.Foldable (toList) import Data.List (find, foldl', intercalate, intersperse, mapAccumL, partition) import Data.List.NonEmpty (NonEmpty(..), nonEmpty) import qualified Data.Map.Strict as M import qualified Data.Set as S import Data.Maybe (fromMaybe, isNothing, isJust) import Data.Semigroup (Semigroup(..)) import qualified Data.Text as T import qualified Data.Text.Lazy.Builder as TB import Data.Word (Word8) import Safe (headDef, lastDef, lastMay) import System.Console.ANSI (Color(..),ColorIntensity(..)) import Test.Tasty (testGroup) import Test.Tasty.HUnit ((@?=), assertBool, testCase) import Hledger.Data.Types import Hledger.Utils (colorB, numDigitsInt) import Hledger.Utils.Text (textQuoteIfNeeded) import Text.WideString (WideBuilder(..), wbFromText, wbToText, wbUnpack) -- A 'Commodity' is a symbol representing a currency or some other kind of -- thing we are tracking, and some display preferences that tell how to -- display 'Amount's of the commodity - is the symbol on the left or right, -- are thousands separated by comma, significant decimal places and so on. -- | Show space-containing commodity symbols quoted, as they are in a journal. showCommoditySymbol :: T.Text -> T.Text showCommoditySymbol = textQuoteIfNeeded -- characters that may not be used in a non-quoted commodity symbol isNonsimpleCommodityChar :: Char -> Bool isNonsimpleCommodityChar = liftA2 (||) isDigit isOther where otherChars = "-+.@*;\t\n \"{}=" :: T.Text isOther c = T.any (==c) otherChars quoteCommoditySymbolIfNeeded :: T.Text -> T.Text quoteCommoditySymbolIfNeeded s | T.any isNonsimpleCommodityChar s = "\"" <> s <> "\"" | otherwise = s -- | Options for the display of Amount and MixedAmount. data AmountDisplayOpts = AmountDisplayOpts { displayPrice :: Bool -- ^ Whether to display the Price of an Amount. , displayZeroCommodity :: Bool -- ^ If the Amount rounds to 0, whether to display its commodity string. , displayThousandsSep :: Bool -- ^ Whether to display thousands separators. , displayColour :: Bool -- ^ Whether to colourise negative Amounts. , displayOneLine :: Bool -- ^ Whether to display on one line. , displayMinWidth :: Maybe Int -- ^ Minimum width to pad to , displayMaxWidth :: Maybe Int -- ^ Maximum width to clip to -- | Display amounts in this order (without the commodity symbol) and display -- a 0 in case a corresponding commodity does not exist , displayOrder :: Maybe [CommoditySymbol] } deriving (Show) -- | Display Amount and MixedAmount with no colour. instance Default AmountDisplayOpts where def = noColour -- | Display Amount and MixedAmount with no colour. noColour :: AmountDisplayOpts noColour = AmountDisplayOpts { displayPrice = True , displayColour = False , displayZeroCommodity = False , displayThousandsSep = True , displayOneLine = False , displayMinWidth = Just 0 , displayMaxWidth = Nothing , displayOrder = Nothing } -- | Display Amount and MixedAmount with no prices. noPrice :: AmountDisplayOpts noPrice = def{displayPrice=False} -- | Display Amount and MixedAmount on one line with no prices. oneLine :: AmountDisplayOpts oneLine = def{displayOneLine=True, displayPrice=False} -- | Display Amount and MixedAmount in a form suitable for CSV output. csvDisplay :: AmountDisplayOpts csvDisplay = oneLine{displayThousandsSep=False} ------------------------------------------------------------------------------- -- Amount styles -- | Default amount style amountstyle = AmountStyle L False (Precision 0) (Just '.') Nothing ------------------------------------------------------------------------------- -- Amount instance Num Amount where abs a@Amount{aquantity=q} = a{aquantity=abs q} signum a@Amount{aquantity=q} = a{aquantity=signum q} fromInteger i = nullamt{aquantity=fromInteger i} negate = transformAmount negate (+) = similarAmountsOp (+) (-) = similarAmountsOp (-) (*) = similarAmountsOp (*) -- | The empty simple amount. nullamt :: Amount nullamt = Amount{acommodity="", aquantity=0, aprice=Nothing, astyle=amountstyle} -- | A temporary value for parsed transactions which had no amount specified. missingamt :: Amount missingamt = nullamt{acommodity="AUTO"} -- Handy amount constructors for tests. -- usd/eur/gbp round their argument to a whole number of pennies/cents. -- XXX these are a bit clashy num n = nullamt{acommodity="", aquantity=n} hrs n = nullamt{acommodity="h", aquantity=n, astyle=amountstyle{asprecision=Precision 2, ascommodityside=R}} usd n = nullamt{acommodity="$", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=Precision 2}} eur n = nullamt{acommodity="€", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=Precision 2}} gbp n = nullamt{acommodity="£", aquantity=roundTo 2 n, astyle=amountstyle{asprecision=Precision 2}} per n = nullamt{acommodity="%", aquantity=n, astyle=amountstyle{asprecision=Precision 1, ascommodityside=R, ascommodityspaced=True}} amt `at` priceamt = amt{aprice=Just $ UnitPrice priceamt} amt @@ priceamt = amt{aprice=Just $ TotalPrice priceamt} -- | Apply a binary arithmetic operator to two amounts, which should -- be in the same commodity if non-zero (warning, this is not checked). -- A zero result keeps the commodity of the second amount. -- The result's display style is that of the second amount, with -- precision set to the highest of either amount. -- Prices are ignored and discarded. -- Remember: the caller is responsible for ensuring both amounts have the same commodity. similarAmountsOp :: (Quantity -> Quantity -> Quantity) -> Amount -> Amount -> Amount similarAmountsOp op Amount{acommodity=_, aquantity=q1, astyle=AmountStyle{asprecision=p1}} Amount{acommodity=c2, aquantity=q2, astyle=s2@AmountStyle{asprecision=p2}} = -- trace ("a1:"++showAmountDebug a1) $ trace ("a2:"++showAmountDebug a2) $ traceWith (("= :"++).showAmountDebug) nullamt{acommodity=c2, aquantity=q1 `op` q2, astyle=s2{asprecision=max p1 p2}} -- c1==c2 || q1==0 || q2==0 = -- otherwise = error "tried to do simple arithmetic with amounts in different commodities" -- | Convert an amount to the specified commodity, ignoring and discarding -- any assigned prices and assuming an exchange rate of 1. amountWithCommodity :: CommoditySymbol -> Amount -> Amount amountWithCommodity c a = a{acommodity=c, aprice=Nothing} -- | Convert a amount to its "cost" or "selling price" in another commodity, -- using its attached transaction price if it has one. Notes: -- -- - price amounts must be MixedAmounts with exactly one component Amount -- (or there will be a runtime error XXX) -- -- - price amounts should be positive in the Journal -- (though this is currently not enforced) amountCost :: Amount -> Amount amountCost a@Amount{aquantity=q, aprice=mp} = case mp of Nothing -> a Just (UnitPrice p@Amount{aquantity=pq}) -> p{aquantity=pq * q} Just (TotalPrice p@Amount{aquantity=pq}) -> p{aquantity=pq} -- | Apply a function to an amount's quantity (and its total price, if it has one). transformAmount :: (Quantity -> Quantity) -> Amount -> Amount transformAmount f a@Amount{aquantity=q,aprice=p} = a{aquantity=f q, aprice=f' <$> p} where f' (TotalPrice a1@Amount{aquantity=pq}) = TotalPrice a1{aquantity = f pq} f' p' = p' -- | Divide an amount's quantity (and its total price, if it has one) by a constant. divideAmount :: Quantity -> Amount -> Amount divideAmount n = transformAmount (/n) -- | Multiply an amount's quantity (and its total price, if it has one) by a constant. multiplyAmount :: Quantity -> Amount -> Amount multiplyAmount n = transformAmount (*n) -- | Is this amount negative ? The price is ignored. isNegativeAmount :: Amount -> Bool isNegativeAmount Amount{aquantity=q} = q < 0 -- | Round an Amount's Quantity to its specified display precision. If that is -- NaturalPrecision, this does nothing. amountRoundedQuantity :: Amount -> Quantity amountRoundedQuantity Amount{aquantity=q, astyle=AmountStyle{asprecision=p}} = case p of NaturalPrecision -> q Precision p' -> roundTo p' q -- | Apply a test to both an Amount and its total price, if it has one. testAmountAndTotalPrice :: (Amount -> Bool) -> Amount -> Bool testAmountAndTotalPrice f amt = case aprice amt of Just (TotalPrice price) -> f amt && f price _ -> f amt -- | Do this Amount and (and its total price, if it has one) appear to be zero when rendered with its -- display precision ? amountLooksZero :: Amount -> Bool amountLooksZero = testAmountAndTotalPrice looksZero where looksZero Amount{aquantity=Decimal e q, astyle=AmountStyle{asprecision=p}} = case p of Precision d -> if e > d then abs q <= 5*10^(e-d-1) else q == 0 NaturalPrecision -> q == 0 -- | Is this Amount (and its total price, if it has one) exactly zero, ignoring its display precision ? amountIsZero :: Amount -> Bool amountIsZero = testAmountAndTotalPrice (\Amount{aquantity=Decimal _ q} -> q == 0) -- | Set an amount's display precision, flipped. withPrecision :: Amount -> AmountPrecision -> Amount withPrecision = flip amountSetPrecision -- | Set an amount's display precision. amountSetPrecision :: AmountPrecision -> Amount -> Amount amountSetPrecision p a@Amount{astyle=s} = a{astyle=s{asprecision=p}} -- | Increase an amount's display precision, if needed, to enough decimal places -- to show it exactly (showing all significant decimal digits, excluding trailing -- zeros). amountSetFullPrecision :: Amount -> Amount amountSetFullPrecision a = amountSetPrecision p a where p = max displayprecision naturalprecision displayprecision = asprecision $ astyle a naturalprecision = Precision . decimalPlaces . normalizeDecimal $ aquantity a -- | Set an amount's internal precision, ie rounds the Decimal representing -- the amount's quantity to some number of decimal places. -- Rounding is done with Data.Decimal's default roundTo function: -- "If the value ends in 5 then it is rounded to the nearest even value (Banker's Rounding)". -- Does not change the amount's display precision. -- Intended mainly for internal use, eg when comparing amounts in tests. setAmountInternalPrecision :: Word8 -> Amount -> Amount setAmountInternalPrecision p a@Amount{ aquantity=q, astyle=s } = a{ astyle=s{asprecision=Precision p} ,aquantity=roundTo p q } -- | Set an amount's internal precision, flipped. -- Intended mainly for internal use, eg when comparing amounts in tests. withInternalPrecision :: Amount -> Word8 -> Amount withInternalPrecision = flip setAmountInternalPrecision -- | Set (or clear) an amount's display decimal point. setAmountDecimalPoint :: Maybe Char -> Amount -> Amount setAmountDecimalPoint mc a@Amount{ astyle=s } = a{ astyle=s{asdecimalpoint=mc} } -- | Set (or clear) an amount's display decimal point, flipped. withDecimalPoint :: Amount -> Maybe Char -> Amount withDecimalPoint = flip setAmountDecimalPoint -- | Strip all prices from an Amount amountStripPrices :: Amount -> Amount amountStripPrices a = a{aprice=Nothing} showAmountPrice :: Amount -> WideBuilder showAmountPrice amt = case aprice amt of Nothing -> mempty Just (UnitPrice pa) -> WideBuilder (TB.fromString " @ ") 3 <> showAmountB noColour pa Just (TotalPrice pa) -> WideBuilder (TB.fromString " @@ ") 4 <> showAmountB noColour (sign pa) where sign = if aquantity amt < 0 then negate else id showAmountPriceDebug :: Maybe AmountPrice -> String showAmountPriceDebug Nothing = "" showAmountPriceDebug (Just (UnitPrice pa)) = " @ " ++ showAmountDebug pa showAmountPriceDebug (Just (TotalPrice pa)) = " @@ " ++ showAmountDebug pa -- | Given a map of standard commodity display styles, apply the -- appropriate one to this amount. If there's no standard style for -- this amount's commodity, return the amount unchanged. -- Also apply the style to the price (except for precision) styleAmount :: M.Map CommoditySymbol AmountStyle -> Amount -> Amount styleAmount styles a = styledAmount{aprice = stylePrice styles (aprice styledAmount)} where styledAmount = case M.lookup (acommodity a) styles of Just s -> a{astyle=s} Nothing -> a stylePrice :: M.Map CommoditySymbol AmountStyle -> Maybe AmountPrice -> Maybe AmountPrice stylePrice styles (Just (UnitPrice a)) = Just (UnitPrice $ styleAmountExceptPrecision styles a) stylePrice styles (Just (TotalPrice a)) = Just (TotalPrice $ styleAmountExceptPrecision styles a) stylePrice _ _ = Nothing -- | Like styleAmount, but keep the number of decimal places unchanged. styleAmountExceptPrecision :: M.Map CommoditySymbol AmountStyle -> Amount -> Amount styleAmountExceptPrecision styles a@Amount{astyle=AmountStyle{asprecision=origp}} = case M.lookup (acommodity a) styles of Just s -> a{astyle=s{asprecision=origp}} Nothing -> a -- | Reset this amount's display style to the default. amountUnstyled :: Amount -> Amount amountUnstyled a = a{astyle=amountstyle} -- | Get the string representation of an amount, based on its -- commodity's display settings. String representations equivalent to -- zero are converted to just \"0\". The special "missing" amount is -- displayed as the empty string. -- -- > showAmount = wbUnpack . showAmountB noColour showAmount :: Amount -> String showAmount = wbUnpack . showAmountB noColour -- | General function to generate a WideBuilder for an Amount, according the -- supplied AmountDisplayOpts. The special "missing" amount is displayed as -- the empty string. This is the main function to use for showing -- Amounts, constructing a builder; it can then be converted to a Text with -- wbToText, or to a String with wbUnpack. showAmountB :: AmountDisplayOpts -> Amount -> WideBuilder showAmountB _ Amount{acommodity="AUTO"} = mempty showAmountB opts a@Amount{astyle=style} = color $ case ascommodityside style of L -> showC (wbFromText c) space <> quantity' <> price R -> quantity' <> showC space (wbFromText c) <> price where quantity = showamountquantity $ if displayThousandsSep opts then a else a{astyle=(astyle a){asdigitgroups=Nothing}} (quantity',c) | amountLooksZero a && not (displayZeroCommodity opts) = (WideBuilder (TB.singleton '0') 1,"") | otherwise = (quantity, quoteCommoditySymbolIfNeeded $ acommodity a) space = if not (T.null c) && ascommodityspaced style then WideBuilder (TB.singleton ' ') 1 else mempty showC l r = if isJust (displayOrder opts) then mempty else l <> r price = if displayPrice opts then showAmountPrice a else mempty color = if displayColour opts && isNegativeAmount a then colorB Dull Red else id -- | Colour version. For a negative amount, adds ANSI codes to change the colour, -- currently to hard-coded red. -- -- > cshowAmount = wbUnpack . showAmountB def{displayColour=True} cshowAmount :: Amount -> String cshowAmount = wbUnpack . showAmountB def{displayColour=True} -- | Get the string representation of an amount, without any \@ price. -- -- > showAmountWithoutPrice = wbUnpack . showAmountB noPrice showAmountWithoutPrice :: Amount -> String showAmountWithoutPrice = wbUnpack . showAmountB noPrice -- | Like showAmount, but show a zero amount's commodity if it has one. -- -- > showAmountWithZeroCommodity = wbUnpack . showAmountB noColour{displayZeryCommodity=True} showAmountWithZeroCommodity :: Amount -> String showAmountWithZeroCommodity = wbUnpack . showAmountB noColour{displayZeroCommodity=True} -- | Get a string representation of an amount for debugging, -- appropriate to the current debug level. 9 shows maximum detail. showAmountDebug :: Amount -> String showAmountDebug Amount{acommodity="AUTO"} = "(missing)" showAmountDebug Amount{..} = "Amount {acommodity=" ++ show acommodity ++ ", aquantity=" ++ show aquantity ++ ", aprice=" ++ showAmountPriceDebug aprice ++ ", astyle=" ++ show astyle ++ "}" -- | Get a Text Builder for the string representation of the number part of of an amount, -- using the display settings from its commodity. Also returns the width of the -- number. showamountquantity :: Amount -> WideBuilder showamountquantity amt@Amount{astyle=AmountStyle{asdecimalpoint=mdec, asdigitgroups=mgrps}} = signB <> intB <> fracB where Decimal e n = amountRoundedQuantity amt strN = T.pack . show $ abs n len = T.length strN intLen = max 1 $ len - fromIntegral e dec = fromMaybe '.' mdec padded = T.replicate (fromIntegral e + 1 - len) "0" <> strN (intPart, fracPart) = T.splitAt intLen padded intB = applyDigitGroupStyle mgrps intLen $ if e == 0 then strN else intPart signB = if n < 0 then WideBuilder (TB.singleton '-') 1 else mempty fracB = if e > 0 then WideBuilder (TB.singleton dec <> TB.fromText fracPart) (fromIntegral e + 1) else mempty -- | Split a string representation into chunks according to DigitGroupStyle, -- returning a Text builder and the number of separators used. applyDigitGroupStyle :: Maybe DigitGroupStyle -> Int -> T.Text -> WideBuilder applyDigitGroupStyle Nothing l s = WideBuilder (TB.fromText s) l applyDigitGroupStyle (Just (DigitGroups _ [])) l s = WideBuilder (TB.fromText s) l applyDigitGroupStyle (Just (DigitGroups c (g0:gs0))) l0 s0 = addseps (g0:|gs0) (toInteger l0) s0 where addseps (g1:|gs1) l1 s1 | l2 > 0 = addseps gs2 l2 rest <> WideBuilder (TB.singleton c <> TB.fromText part) (fromIntegral g1 + 1) | otherwise = WideBuilder (TB.fromText s1) (fromInteger l1) where (rest, part) = T.splitAt (fromInteger l2) s1 gs2 = fromMaybe (g1:|[]) $ nonEmpty gs1 l2 = l1 - toInteger g1 -- like journalCanonicaliseAmounts -- | Canonicalise an amount's display style using the provided commodity style map. canonicaliseAmount :: M.Map CommoditySymbol AmountStyle -> Amount -> Amount canonicaliseAmount styles a@Amount{acommodity=c, astyle=s} = a{astyle=s'} where s' = M.findWithDefault s c styles ------------------------------------------------------------------------------- -- MixedAmount instance Semigroup MixedAmount where (<>) = maPlus sconcat = maSum stimes n = multiplyMixedAmount (fromIntegral n) instance Monoid MixedAmount where mempty = nullmixedamt mconcat = maSum instance Num MixedAmount where fromInteger = mixedAmount . fromInteger negate = maNegate (+) = maPlus (*) = error "error, mixed amounts do not support multiplication" -- PARTIAL: abs = error "error, mixed amounts do not support abs" signum = error "error, mixed amounts do not support signum" -- | Calculate the key used to store an Amount within a MixedAmount. amountKey :: Amount -> MixedAmountKey amountKey amt@Amount{acommodity=c} = case aprice amt of Nothing -> MixedAmountKeyNoPrice c Just (TotalPrice p) -> MixedAmountKeyTotalPrice c (acommodity p) Just (UnitPrice p) -> MixedAmountKeyUnitPrice c (acommodity p) (aquantity p) -- | The empty mixed amount. nullmixedamt :: MixedAmount nullmixedamt = Mixed mempty -- | A temporary value for parsed transactions which had no amount specified. missingmixedamt :: MixedAmount missingmixedamt = mixedAmount missingamt -- | Whether a MixedAmount has a missing amount isMissingMixedAmount :: MixedAmount -> Bool isMissingMixedAmount (Mixed ma) = amountKey missingamt `M.member` ma -- | Convert amounts in various commodities into a mixed amount. mixed :: Foldable t => t Amount -> MixedAmount mixed = maAddAmounts nullmixedamt -- | Create a MixedAmount from a single Amount. mixedAmount :: Amount -> MixedAmount mixedAmount a = Mixed $ M.singleton (amountKey a) a -- | Add an Amount to a MixedAmount, normalising the result. -- Amounts with different costs are kept separate. maAddAmount :: MixedAmount -> Amount -> MixedAmount maAddAmount (Mixed ma) a = Mixed $ M.insertWith sumSimilarAmountsUsingFirstPrice (amountKey a) a ma -- | Add a collection of Amounts to a MixedAmount, normalising the result. -- Amounts with different costs are kept separate. maAddAmounts :: Foldable t => MixedAmount -> t Amount -> MixedAmount maAddAmounts = foldl' maAddAmount -- | Negate mixed amount's quantities (and total prices, if any). maNegate :: MixedAmount -> MixedAmount maNegate = transformMixedAmount negate -- | Sum two MixedAmount, keeping the cost of the first if any. -- Amounts with different costs are kept separate (since 2021). maPlus :: MixedAmount -> MixedAmount -> MixedAmount maPlus (Mixed as) (Mixed bs) = Mixed $ M.unionWith sumSimilarAmountsUsingFirstPrice as bs -- | Subtract a MixedAmount from another. -- Amounts with different costs are kept separate. maMinus :: MixedAmount -> MixedAmount -> MixedAmount maMinus a = maPlus a . maNegate -- | Sum a collection of MixedAmounts. -- Amounts with different costs are kept separate. maSum :: Foldable t => t MixedAmount -> MixedAmount maSum = foldl' maPlus nullmixedamt -- | Divide a mixed amount's quantities (and total prices, if any) by a constant. divideMixedAmount :: Quantity -> MixedAmount -> MixedAmount divideMixedAmount n = transformMixedAmount (/n) -- | Multiply a mixed amount's quantities (and total prices, if any) by a constant. multiplyMixedAmount :: Quantity -> MixedAmount -> MixedAmount multiplyMixedAmount n = transformMixedAmount (*n) -- | Apply a function to a mixed amount's quantities (and its total prices, if it has any). transformMixedAmount :: (Quantity -> Quantity) -> MixedAmount -> MixedAmount transformMixedAmount f = mapMixedAmountUnsafe (transformAmount f) -- | Calculate the average of some mixed amounts. averageMixedAmounts :: [MixedAmount] -> MixedAmount averageMixedAmounts as = fromIntegral (length as) `divideMixedAmount` maSum as -- | Is this mixed amount negative, if we can tell that unambiguously? -- Ie when normalised, are all individual commodity amounts negative ? isNegativeMixedAmount :: MixedAmount -> Maybe Bool isNegativeMixedAmount m = case amounts $ mixedAmountStripPrices m of [] -> Just False [a] -> Just $ isNegativeAmount a as | all isNegativeAmount as -> Just True as | not (any isNegativeAmount as) -> Just False _ -> Nothing -- multiple amounts with different signs -- | Does this mixed amount appear to be zero when rendered with its display precision? -- i.e. does it have zero quantity with no price, zero quantity with a total price (which is also zero), -- and zero quantity for each unit price? mixedAmountLooksZero :: MixedAmount -> Bool mixedAmountLooksZero (Mixed ma) = all amountLooksZero ma -- | Is this mixed amount exactly zero, ignoring its display precision? -- i.e. does it have zero quantity with no price, zero quantity with a total price (which is also zero), -- and zero quantity for each unit price? mixedAmountIsZero :: MixedAmount -> Bool mixedAmountIsZero (Mixed ma) = all amountIsZero ma -- | Is this mixed amount exactly zero, ignoring its display precision? -- -- A convenient alias for mixedAmountIsZero. maIsZero :: MixedAmount -> Bool maIsZero = mixedAmountIsZero -- | Is this mixed amount non-zero, ignoring its display precision? -- -- A convenient alias for not . mixedAmountIsZero. maIsNonZero :: MixedAmount -> Bool maIsNonZero = not . mixedAmountIsZero -- | Get a mixed amount's component amounts. -- -- * amounts in the same commodity are combined unless they have different prices or total prices -- -- * multiple zero amounts, all with the same non-null commodity, are replaced by just the last of them, preserving the commodity and amount style (all but the last zero amount are discarded) -- -- * multiple zero amounts with multiple commodities, or no commodities, are replaced by one commodity-less zero amount -- -- * an empty amount list is replaced by one commodity-less zero amount -- -- * the special "missing" mixed amount remains unchanged -- amounts :: MixedAmount -> [Amount] amounts (Mixed ma) | isMissingMixedAmount (Mixed ma) = [missingamt] -- missingamt should always be alone, but detect it even if not | M.null nonzeros = [newzero] | otherwise = toList nonzeros where newzero = fromMaybe nullamt $ find (not . T.null . acommodity) zeros (zeros, nonzeros) = M.partition amountIsZero ma -- | Get a mixed amount's component amounts without normalising zero and missing -- amounts. This is used for JSON serialisation, so the order is important. In -- particular, we want the Amounts given in the order of the MixedAmountKeys, -- i.e. lexicographically first by commodity, then by price commodity, then by -- unit price from most negative to most positive. amountsRaw :: MixedAmount -> [Amount] amountsRaw (Mixed ma) = toList ma -- | Get this mixed amount's commodities as a set. -- Returns an empty set if there are no amounts. maCommodities :: MixedAmount -> S.Set CommoditySymbol maCommodities = S.fromList . fmap acommodity . amounts' where amounts' ma@(Mixed m) = if M.null m then [] else amounts ma -- | Unify a MixedAmount to a single commodity value if possible. -- This consolidates amounts of the same commodity and discards zero -- amounts; but this one insists on simplifying to a single commodity, -- and will return Nothing if this is not possible. unifyMixedAmount :: MixedAmount -> Maybe Amount unifyMixedAmount = foldM combine 0 . amounts where combine amt result | amountIsZero amt = Just result | amountIsZero result = Just amt | acommodity amt == acommodity result = Just $ amt + result | otherwise = Nothing -- | Sum same-commodity amounts in a lossy way, applying the first -- price to the result and discarding any other prices. Only used as a -- rendering helper. sumSimilarAmountsUsingFirstPrice :: Amount -> Amount -> Amount sumSimilarAmountsUsingFirstPrice a b = (a + b){aprice=p} where p = case (aprice a, aprice b) of (Just (TotalPrice ap), Just (TotalPrice bp)) -> Just . TotalPrice $ ap{aquantity = aquantity ap + aquantity bp } _ -> aprice a -- -- | Sum same-commodity amounts. If there were different prices, set -- -- the price to a special marker indicating "various". Only used as a -- -- rendering helper. -- sumSimilarAmountsNotingPriceDifference :: [Amount] -> Amount -- sumSimilarAmountsNotingPriceDifference [] = nullamt -- sumSimilarAmountsNotingPriceDifference as = undefined -- | Filter a mixed amount's component amounts by a predicate. filterMixedAmount :: (Amount -> Bool) -> MixedAmount -> MixedAmount filterMixedAmount p (Mixed ma) = Mixed $ M.filter p ma -- | Return an unnormalised MixedAmount containing exactly one Amount -- with the specified commodity and the quantity of that commodity -- found in the original. NB if Amount's quantity is zero it will be -- discarded next time the MixedAmount gets normalised. filterMixedAmountByCommodity :: CommoditySymbol -> MixedAmount -> MixedAmount filterMixedAmountByCommodity c (Mixed ma) | M.null ma' = mixedAmount nullamt{acommodity=c} | otherwise = Mixed ma' where ma' = M.filter ((c==) . acommodity) ma -- | Apply a transform to a mixed amount's component 'Amount's. mapMixedAmount :: (Amount -> Amount) -> MixedAmount -> MixedAmount mapMixedAmount f (Mixed ma) = mixed . map f $ toList ma -- | Apply a transform to a mixed amount's component 'Amount's, which does not -- affect the key of the amount (i.e. doesn't change the commodity, price -- commodity, or unit price amount). This condition is not checked. mapMixedAmountUnsafe :: (Amount -> Amount) -> MixedAmount -> MixedAmount mapMixedAmountUnsafe f (Mixed ma) = Mixed $ M.map f ma -- Use M.map instead of fmap to maintain strictness -- | Convert all component amounts to cost/selling price where -- possible (see amountCost). mixedAmountCost :: MixedAmount -> MixedAmount mixedAmountCost (Mixed ma) = foldl' (\m a -> maAddAmount m (amountCost a)) (Mixed noPrices) withPrices where (noPrices, withPrices) = M.partition (isNothing . aprice) ma -- -- | MixedAmount derived Eq instance in Types.hs doesn't know that we -- -- want $0 = EUR0 = 0. Yet we don't want to drag all this code over there. -- -- For now, use this when cross-commodity zero equality is important. -- mixedAmountEquals :: MixedAmount -> MixedAmount -> Bool -- mixedAmountEquals a b = amounts a' == amounts b' || (mixedAmountLooksZero a' && mixedAmountLooksZero b') -- where a' = mixedAmountStripPrices a -- b' = mixedAmountStripPrices b -- | Given a map of standard commodity display styles, apply the -- appropriate one to each individual amount. styleMixedAmount :: M.Map CommoditySymbol AmountStyle -> MixedAmount -> MixedAmount styleMixedAmount styles = mapMixedAmountUnsafe (styleAmount styles) -- | Reset each individual amount's display style to the default. mixedAmountUnstyled :: MixedAmount -> MixedAmount mixedAmountUnstyled = mapMixedAmountUnsafe amountUnstyled -- | Get the string representation of a mixed amount, after -- normalising it to one amount per commodity. Assumes amounts have -- no or similar prices, otherwise this can show misleading prices. -- -- > showMixedAmount = wbUnpack . showMixedAmountB noColour showMixedAmount :: MixedAmount -> String showMixedAmount = wbUnpack . showMixedAmountB noColour -- | Get the one-line string representation of a mixed amount (also showing any costs). -- -- > showMixedAmountOneLine = wbUnpack . showMixedAmountB oneLine showMixedAmountOneLine :: MixedAmount -> String showMixedAmountOneLine = wbUnpack . showMixedAmountB oneLine{displayPrice=True} -- | Like showMixedAmount, but zero amounts are shown with their -- commodity if they have one. -- -- > showMixedAmountWithZeroCommodity = wbUnpack . showMixedAmountB noColour{displayZeroCommodity=True} showMixedAmountWithZeroCommodity :: MixedAmount -> String showMixedAmountWithZeroCommodity = wbUnpack . showMixedAmountB noColour{displayZeroCommodity=True} -- | Get the string representation of a mixed amount, without showing any transaction prices. -- With a True argument, adds ANSI codes to show negative amounts in red. -- -- > showMixedAmountWithoutPrice c = wbUnpack . showMixedAmountB noPrice{displayColour=c} showMixedAmountWithoutPrice :: Bool -> MixedAmount -> String showMixedAmountWithoutPrice c = wbUnpack . showMixedAmountB noPrice{displayColour=c} -- | Get the one-line string representation of a mixed amount, but without -- any \@ prices. -- With a True argument, adds ANSI codes to show negative amounts in red. -- -- > showMixedAmountOneLineWithoutPrice c = wbUnpack . showMixedAmountB oneLine{displayColour=c} showMixedAmountOneLineWithoutPrice :: Bool -> MixedAmount -> String showMixedAmountOneLineWithoutPrice c = wbUnpack . showMixedAmountB oneLine{displayColour=c} -- | Like showMixedAmountOneLineWithoutPrice, but show at most the given width, -- with an elision indicator if there are more. -- With a True argument, adds ANSI codes to show negative amounts in red. -- -- > showMixedAmountElided w c = wbUnpack . showMixedAmountB oneLine{displayColour=c, displayMaxWidth=Just w} showMixedAmountElided :: Int -> Bool -> MixedAmount -> String showMixedAmountElided w c = wbUnpack . showMixedAmountB oneLine{displayColour=c, displayMaxWidth=Just w} -- | Get an unambiguous string representation of a mixed amount for debugging. showMixedAmountDebug :: MixedAmount -> String showMixedAmountDebug m | m == missingmixedamt = "(missing)" | otherwise = "Mixed [" ++ as ++ "]" where as = intercalate "\n " $ map showAmountDebug $ amounts m -- | General function to generate a WideBuilder for a MixedAmount, according to the -- supplied AmountDisplayOpts. This is the main function to use for showing -- MixedAmounts, constructing a builder; it can then be converted to a Text with -- wbToText, or to a String with wbUnpack. -- -- If a maximum width is given then: -- - If displayed on one line, it will display as many Amounts as can -- fit in the given width, and further Amounts will be elided. There -- will always be at least one amount displayed, even if this will -- exceed the requested maximum width. -- - If displayed on multiple lines, any Amounts longer than the -- maximum width will be elided. showMixedAmountB :: AmountDisplayOpts -> MixedAmount -> WideBuilder showMixedAmountB opts ma | displayOneLine opts = showMixedAmountOneLineB opts ma | otherwise = WideBuilder (wbBuilder . mconcat $ intersperse sep ls) width where ls = showMixedAmountLinesB opts ma width = headDef 0 $ map wbWidth ls sep = WideBuilder (TB.singleton '\n') 0 -- | Helper for showMixedAmountB to show a list of Amounts on multiple lines. This returns -- the list of WideBuilders: one for each Amount, and padded/elided to the appropriate -- width. This does not honour displayOneLine: all amounts will be displayed as if -- displayOneLine were False. showMixedAmountLinesB :: AmountDisplayOpts -> MixedAmount -> [WideBuilder] showMixedAmountLinesB opts@AmountDisplayOpts{displayMaxWidth=mmax,displayMinWidth=mmin} ma = map (adBuilder . pad) elided where astrs = amtDisplayList (wbWidth sep) (showAmountB opts) . orderedAmounts opts $ if displayPrice opts then ma else mixedAmountStripPrices ma sep = WideBuilder (TB.singleton '\n') 0 width = maximum $ map (wbWidth . adBuilder) elided pad amt | Just mw <- mmin = let w = (max width mw) - wbWidth (adBuilder amt) in amt{ adBuilder = WideBuilder (TB.fromText $ T.replicate w " ") w <> adBuilder amt } | otherwise = amt elided = maybe id elideTo mmax astrs elideTo m xs = maybeAppend elisionStr short where elisionStr = elisionDisplay (Just m) (wbWidth sep) (length long) $ lastDef nullAmountDisplay short (short, long) = partition ((m>=) . wbWidth . adBuilder) xs -- | Helper for showMixedAmountB to deal with single line displays. This does not -- honour displayOneLine: all amounts will be displayed as if displayOneLine -- were True. showMixedAmountOneLineB :: AmountDisplayOpts -> MixedAmount -> WideBuilder showMixedAmountOneLineB opts@AmountDisplayOpts{displayMaxWidth=mmax,displayMinWidth=mmin} ma = WideBuilder (wbBuilder . pad . mconcat . intersperse sep $ map adBuilder elided) . max width $ fromMaybe 0 mmin where width = maybe 0 adTotal $ lastMay elided astrs = amtDisplayList (wbWidth sep) (showAmountB opts) . orderedAmounts opts $ if displayPrice opts then ma else mixedAmountStripPrices ma sep = WideBuilder (TB.fromString ", ") 2 n = length astrs pad = (WideBuilder (TB.fromText $ T.replicate w " ") w <>) where w = fromMaybe 0 mmin - width elided = maybe id elideTo mmax astrs elideTo m = addElide . takeFitting m . withElided -- Add the last elision string to the end of the display list addElide [] = [] addElide xs = maybeAppend (snd $ last xs) $ map fst xs -- Return the elements of the display list which fit within the maximum width -- (including their elision strings). Always display at least one amount, -- regardless of width. takeFitting _ [] = [] takeFitting m (x:xs) = x : dropWhileRev (\(a,e) -> m < adTotal (fromMaybe a e)) xs dropWhileRev p = foldr (\x xs -> if null xs && p x then [] else x:xs) [] -- Add the elision strings (if any) to each amount withElided = zipWith (\n2 amt -> (amt, elisionDisplay Nothing (wbWidth sep) n2 amt)) [n-1,n-2..0] orderedAmounts :: AmountDisplayOpts -> MixedAmount -> [Amount] orderedAmounts dopts = maybe id (mapM pad) (displayOrder dopts) . amounts where pad c = fromMaybe (amountWithCommodity c nullamt) . find ((c==) . acommodity) data AmountDisplay = AmountDisplay { adBuilder :: !WideBuilder -- ^ String representation of the Amount , adTotal :: !Int -- ^ Cumulative length of MixedAmount this Amount is part of, -- including separators } deriving (Show) nullAmountDisplay :: AmountDisplay nullAmountDisplay = AmountDisplay mempty 0 amtDisplayList :: Int -> (Amount -> WideBuilder) -> [Amount] -> [AmountDisplay] amtDisplayList sep showamt = snd . mapAccumL display (-sep) where display tot amt = (tot', AmountDisplay str tot') where str = showamt amt tot' = tot + (wbWidth str) + sep -- The string "m more", added to the previous running total elisionDisplay :: Maybe Int -> Int -> Int -> AmountDisplay -> Maybe AmountDisplay elisionDisplay mmax sep n lastAmt | n > 0 = Just $ AmountDisplay (WideBuilder (TB.fromText str) len) (adTotal lastAmt + len) | otherwise = Nothing where fullString = T.pack $ show n ++ " more.." -- sep from the separator, 7 from " more..", numDigits n from number fullLength = sep + 7 + numDigitsInt n str | Just m <- mmax, fullLength > m = T.take (m - 2) fullString <> ".." | otherwise = fullString len = case mmax of Nothing -> fullLength Just m -> max 2 $ min m fullLength maybeAppend :: Maybe a -> [a] -> [a] maybeAppend Nothing = id maybeAppend (Just a) = (++[a]) -- | Set the display precision in the amount's commodities. mixedAmountSetPrecision :: AmountPrecision -> MixedAmount -> MixedAmount mixedAmountSetPrecision p = mapMixedAmountUnsafe (amountSetPrecision p) -- | In each component amount, increase the display precision sufficiently -- to render it exactly (showing all significant decimal digits). mixedAmountSetFullPrecision :: MixedAmount -> MixedAmount mixedAmountSetFullPrecision = mapMixedAmountUnsafe amountSetFullPrecision -- | Remove all prices from a MixedAmount. mixedAmountStripPrices :: MixedAmount -> MixedAmount mixedAmountStripPrices (Mixed ma) = foldl' (\m a -> maAddAmount m a{aprice=Nothing}) (Mixed noPrices) withPrices where (noPrices, withPrices) = M.partition (isNothing . aprice) ma -- | Canonicalise a mixed amount's display styles using the provided commodity style map. canonicaliseMixedAmount :: M.Map CommoditySymbol AmountStyle -> MixedAmount -> MixedAmount canonicaliseMixedAmount styles = mapMixedAmountUnsafe (canonicaliseAmount styles) ------------------------------------------------------------------------------- -- tests tests_Amount = testGroup "Amount" [ testGroup "Amount" [ testCase "amountCost" $ do amountCost (eur 1) @?= eur 1 amountCost (eur 2){aprice=Just $ UnitPrice $ usd 2} @?= usd 4 amountCost (eur 1){aprice=Just $ TotalPrice $ usd 2} @?= usd 2 amountCost (eur (-1)){aprice=Just $ TotalPrice $ usd (-2)} @?= usd (-2) ,testCase "amountLooksZero" $ do assertBool "" $ amountLooksZero nullamt assertBool "" $ amountLooksZero $ usd 0 ,testCase "negating amounts" $ do negate (usd 1) @?= (usd 1){aquantity= -1} let b = (usd 1){aprice=Just $ UnitPrice $ eur 2} in negate b @?= b{aquantity= -1} ,testCase "adding amounts without prices" $ do (usd 1.23 + usd (-1.23)) @?= usd 0 (usd 1.23 + usd (-1.23)) @?= usd 0 (usd (-1.23) + usd (-1.23)) @?= usd (-2.46) sum [usd 1.23,usd (-1.23),usd (-1.23),-(usd (-1.23))] @?= usd 0 -- highest precision is preserved asprecision (astyle $ sum [usd 1 `withPrecision` Precision 1, usd 1 `withPrecision` Precision 3]) @?= Precision 3 asprecision (astyle $ sum [usd 1 `withPrecision` Precision 3, usd 1 `withPrecision` Precision 1]) @?= Precision 3 -- adding different commodities assumes conversion rate 1 assertBool "" $ amountLooksZero (usd 1.23 - eur 1.23) ,testCase "showAmount" $ do showAmount (usd 0 + gbp 0) @?= "0" ] ,testGroup "MixedAmount" [ testCase "comparing mixed amounts compares based on quantities" $ do let usdpos = mixed [usd 1] usdneg = mixed [usd (-1)] eurneg = mixed [eur (-12)] compare usdneg usdpos @?= LT compare eurneg usdpos @?= LT ,testCase "adding mixed amounts to zero, the commodity and amount style are preserved" $ maSum (map mixedAmount [usd 1.25 ,usd (-1) `withPrecision` Precision 3 ,usd (-0.25) ]) @?= mixedAmount (usd 0 `withPrecision` Precision 3) ,testCase "adding mixed amounts with total prices" $ do maSum (map mixedAmount [usd 1 @@ eur 1 ,usd (-2) @@ eur 1 ]) @?= mixedAmount (usd (-1) @@ eur 2) ,testCase "showMixedAmount" $ do showMixedAmount (mixedAmount (usd 1)) @?= "$1.00" showMixedAmount (mixedAmount (usd 1 `at` eur 2)) @?= "$1.00 @ €2.00" showMixedAmount (mixedAmount (usd 0)) @?= "0" showMixedAmount nullmixedamt @?= "0" showMixedAmount missingmixedamt @?= "" ,testCase "showMixedAmountWithoutPrice" $ do let a = usd 1 `at` eur 2 showMixedAmountWithoutPrice False (mixedAmount (a)) @?= "$1.00" showMixedAmountWithoutPrice False (mixed [a, -a]) @?= "0" ,testGroup "amounts" [ testCase "a missing amount overrides any other amounts" $ amounts (mixed [usd 1, missingamt]) @?= [missingamt] ,testCase "unpriced same-commodity amounts are combined" $ amounts (mixed [usd 0, usd 2]) @?= [usd 2] ,testCase "amounts with same unit price are combined" $ amounts (mixed [usd 1 `at` eur 1, usd 1 `at` eur 1]) @?= [usd 2 `at` eur 1] ,testCase "amounts with different unit prices are not combined" $ amounts (mixed [usd 1 `at` eur 1, usd 1 `at` eur 2]) @?= [usd 1 `at` eur 1, usd 1 `at` eur 2] ,testCase "amounts with total prices are combined" $ amounts (mixed [usd 1 @@ eur 1, usd 1 @@ eur 1]) @?= [usd 2 @@ eur 2] ] ,testCase "mixedAmountStripPrices" $ do amounts (mixedAmountStripPrices nullmixedamt) @?= [nullamt] assertBool "" $ mixedAmountLooksZero $ mixedAmountStripPrices (mixed [usd 10 ,usd 10 @@ eur 7 ,usd (-10) ,usd (-10) @@ eur (-7) ]) ] ] hledger-lib-1.30/Hledger/Data/Balancing.hs0000644000000000000000000013375714435646037016472 0ustar0000000000000000{-| Functions for ensuring transactions and journals are balanced. -} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PackageImports #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TemplateHaskell #-} module Hledger.Data.Balancing ( -- * BalancingOpts BalancingOpts(..) , HasBalancingOpts(..) , defbalancingopts -- * transaction balancing , isTransactionBalanced , balanceTransaction , balanceTransactionHelper -- * journal balancing , journalBalanceTransactions , journalCheckBalanceAssertions -- * tests , tests_Balancing ) where import Control.Monad (forM, forM_, when, unless) import Control.Monad.Except (ExceptT(..), runExceptT, throwError) import "extra" Control.Monad.Extra (whenM) import Control.Monad.Reader as R (ReaderT, reader, runReaderT, ask, asks) import Control.Monad.ST (ST, runST) import Control.Monad.Trans.Class (lift) import Data.Array.ST (STArray, getElems, newListArray, writeArray) import Data.Foldable (asum) import Data.Function ((&)) import Data.Functor ((<&>)) import "base-compat" Data.Functor.Compat (void) import qualified Data.HashTable.Class as H (toList) import qualified Data.HashTable.ST.Cuckoo as H import Data.List (partition, sortOn) import Data.List.Extra (nubSort) import Data.Maybe (fromJust, fromMaybe, isJust, isNothing, mapMaybe) import qualified Data.Set as S import qualified Data.Text as T import Data.Time.Calendar (fromGregorian) import qualified Data.Map as M import Safe (headDef) import Text.Printf (printf) import Hledger.Utils import Hledger.Data.Types import Hledger.Data.AccountName (isAccountNamePrefixOf) import Hledger.Data.Amount import Hledger.Data.Journal import Hledger.Data.Posting import Hledger.Data.Transaction import Hledger.Data.Errors data BalancingOpts = BalancingOpts { ignore_assertions_ :: Bool -- ^ should failing balance assertions be ignored ? , infer_balancing_costs_ :: Bool -- ^ Are we permitted to infer missing costs to balance transactions ? -- Distinct from InputOpts{infer_costs_}. , commodity_styles_ :: Maybe (M.Map CommoditySymbol AmountStyle) -- ^ commodity display styles } deriving (Show) defbalancingopts :: BalancingOpts defbalancingopts = BalancingOpts { ignore_assertions_ = False , infer_balancing_costs_ = True , commodity_styles_ = Nothing } -- | Check that this transaction would appear balanced to a human when displayed. -- On success, returns the empty list, otherwise one or more error messages. -- -- In more detail: -- For the real postings, and separately for the balanced virtual postings: -- -- 1. Convert amounts to cost where possible -- -- 2. When there are two or more non-zero amounts -- (appearing non-zero when displayed, using the given display styles if provided), -- are they a mix of positives and negatives ? -- This is checked separately to give a clearer error message. -- (Best effort; could be confused by postings with multicommodity amounts.) -- -- 3. Does the amounts' sum appear non-zero when displayed ? -- (using the given display styles if provided) -- transactionCheckBalanced :: BalancingOpts -> Transaction -> [String] transactionCheckBalanced BalancingOpts{commodity_styles_} t = errs where (rps, bvps) = foldr partitionPosting ([], []) $ tpostings t where partitionPosting p ~(l, r) = case ptype p of RegularPosting -> (p:l, r) BalancedVirtualPosting -> (l, p:r) VirtualPosting -> (l, r) -- check for mixed signs, detecting nonzeros at display precision canonicalise = maybe id canonicaliseMixedAmount commodity_styles_ postingBalancingAmount p | "_price-matched" `elem` map fst (ptags p) = mixedAmountStripPrices $ pamount p | otherwise = mixedAmountCost $ pamount p signsOk ps = case filter (not.mixedAmountLooksZero) $ map (canonicalise.postingBalancingAmount) ps of nonzeros | length nonzeros >= 2 -> length (nubSort $ mapMaybe isNegativeMixedAmount nonzeros) > 1 _ -> True (rsignsok, bvsignsok) = (signsOk rps, signsOk bvps) -- check for zero sum, at display precision (rsumcost, bvsumcost) = (foldMap postingBalancingAmount rps, foldMap postingBalancingAmount bvps) (rsumdisplay, bvsumdisplay) = (canonicalise rsumcost, canonicalise bvsumcost) (rsumok, bvsumok) = (mixedAmountLooksZero rsumdisplay, mixedAmountLooksZero bvsumdisplay) -- generate error messages, showing amounts with their original precision errs = filter (not.null) [rmsg, bvmsg] where rmsg | rsumok = "" | not rsignsok = "The real postings all have the same sign. Consider negating some of them." | otherwise = "The real postings' sum should be 0 but is: " ++ showMixedAmountOneLineWithoutPrice False rsumcost bvmsg | bvsumok = "" | not bvsignsok = "The balanced virtual postings all have the same sign. Consider negating some of them." | otherwise = "The balanced virtual postings' sum should be 0 but is: " ++ showMixedAmountOneLineWithoutPrice False bvsumcost -- | Legacy form of transactionCheckBalanced. isTransactionBalanced :: BalancingOpts -> Transaction -> Bool isTransactionBalanced bopts = null . transactionCheckBalanced bopts -- | Balance this transaction, ensuring that its postings -- (and its balanced virtual postings) sum to 0, -- by inferring a missing amount or conversion price(s) if needed. -- Or if balancing is not possible, because the amounts don't sum to 0 or -- because there's more than one missing amount, return an error message. -- -- Transactions with balance assignments can have more than one -- missing amount; to balance those you should use the more powerful -- journalBalanceTransactions. -- -- The "sum to 0" test is done using commodity display precisions, -- if provided, so that the result agrees with the numbers users can see. -- balanceTransaction :: BalancingOpts -> Transaction -> Either String Transaction balanceTransaction bopts = fmap fst . balanceTransactionHelper bopts -- | Helper used by balanceTransaction and balanceTransactionWithBalanceAssignmentAndCheckAssertionsB; -- use one of those instead. -- It also returns a list of accounts and amounts that were inferred. balanceTransactionHelper :: BalancingOpts -> Transaction -> Either String (Transaction, [(AccountName, MixedAmount)]) balanceTransactionHelper bopts t = do (t', inferredamtsandaccts) <- transactionInferBalancingAmount (fromMaybe M.empty $ commodity_styles_ bopts) $ (if infer_balancing_costs_ bopts then transactionInferBalancingCosts else id) t case transactionCheckBalanced bopts t' of [] -> Right (txnTieKnot t', inferredamtsandaccts) errs -> Left $ transactionBalanceError t' errs' where ismulticommodity = (length $ transactionCommodities t') > 1 errs' = [ "Automatic commodity conversion is not enabled." | ismulticommodity && not (infer_balancing_costs_ bopts) ] ++ errs ++ if ismulticommodity then [ "Consider adjusting this entry's amounts, adding missing postings," , "or recording conversion price(s) with @, @@ or equity postings." ] else [ "Consider adjusting this entry's amounts, or adding missing postings." ] transactionCommodities :: Transaction -> S.Set CommoditySymbol transactionCommodities t = mconcat $ map (maCommodities . pamount) $ tpostings t -- | Generate a transaction balancing error message, given the transaction -- and one or more suberror messages. transactionBalanceError :: Transaction -> [String] -> String transactionBalanceError t errs = printf "%s:\n%s\n\nThis %stransaction is unbalanced.\n%s" (sourcePosPairPretty $ tsourcepos t) (textChomp ex) (if ismulticommodity then "multi-commodity " else "" :: String) (chomp $ unlines errs) where ismulticommodity = (length $ transactionCommodities t) > 1 (_f,_l,_mcols,ex) = makeTransactionErrorExcerpt t finderrcols where finderrcols _ = Nothing -- finderrcols t = Just (1, Just w) -- where -- w = maximumDef 1 $ map T.length $ T.lines $ showTransaction t -- | Infer up to one missing amount for this transactions's real postings, and -- likewise for its balanced virtual postings, if needed; or return an error -- message if we can't. Returns the updated transaction and any inferred posting amounts, -- with the corresponding accounts, in order). -- -- We can infer a missing amount when there are multiple postings and exactly -- one of them is amountless. If the amounts had price(s) the inferred amount -- have the same price(s), and will be converted to the price commodity. transactionInferBalancingAmount :: M.Map CommoditySymbol AmountStyle -- ^ commodity display styles -> Transaction -> Either String (Transaction, [(AccountName, MixedAmount)]) transactionInferBalancingAmount styles t@Transaction{tpostings=ps} | length amountlessrealps > 1 = Left $ transactionBalanceError t ["There can't be more than one real posting with no amount." ,"(Remember to put two or more spaces between account and amount.)"] | length amountlessbvps > 1 = Left $ transactionBalanceError t ["There can't be more than one balanced virtual posting with no amount." ,"(Remember to put two or more spaces between account and amount.)"] | otherwise = let psandinferredamts = map inferamount ps inferredacctsandamts = [(paccount p, amt) | (p, Just amt) <- psandinferredamts] in Right (t{tpostings=map fst psandinferredamts}, inferredacctsandamts) where (amountfulrealps, amountlessrealps) = partition hasAmount (realPostings t) realsum = sumPostings amountfulrealps (amountfulbvps, amountlessbvps) = partition hasAmount (balancedVirtualPostings t) bvsum = sumPostings amountfulbvps inferamount :: Posting -> (Posting, Maybe MixedAmount) inferamount p = let minferredamt = case ptype p of RegularPosting | not (hasAmount p) -> Just realsum BalancedVirtualPosting | not (hasAmount p) -> Just bvsum VirtualPosting | not (hasAmount p) -> Just 0 _ -> Nothing in case minferredamt of Nothing -> (p, Nothing) Just a -> (p{pamount=a', poriginal=Just $ originalPosting p}, Just a') where -- Inferred amounts are converted to cost. -- Also ensure the new amount has the standard style for its commodity -- (since the main amount styling pass happened before this balancing pass); a' = styleMixedAmount styles . mixedAmountCost $ maNegate a -- | Infer costs for this transaction's posting amounts, if needed to make -- the postings balance, and if permitted. This is done once for the real -- postings and again (separately) for the balanced virtual postings. When -- it's not possible, the transaction is left unchanged. -- -- The simplest example is a transaction with two postings, each in a -- different commodity, with no costs specified. In this case we'll add a -- cost to the first posting such that it can be converted to the commodity -- of the second posting (with -B), and such that the postings balance. -- -- In general, we can infer a cost (conversion rate) when the sum of posting amounts -- contains exactly two different commodities and no explicit costs. Also -- all postings are expected to contain an explicit amount (no missing -- amounts) in a single commodity. Otherwise no cost inferring is attempted. -- -- The transaction itself could contain more than two commodities, and/or -- costs, if they cancel out; what matters is that the sum of posting amounts -- contains exactly two commodities and zero costs. -- -- There can also be more than two postings in either of the commodities. -- -- We want to avoid excessive display of digits when the calculated cost is -- an irrational number, while hopefully also ensuring the displayed numbers -- make sense if the user does a manual calculation. This is (mostly) achieved -- in two ways: -- -- - when there is only one posting in the "from" commodity, a total cost -- (@@) is used, and all available decimal digits are shown -- -- - otherwise, a suitable averaged unit cost (@) is applied to the relevant -- postings, with display precision equal to the summed display precisions -- of the two commodities being converted between, or 2, whichever is larger. -- -- (We don't always calculate a good-looking display precision for unit costs -- when the commodity display precisions are low, eg when a journal doesn't -- use any decimal places. The minimum of 2 helps make the costs shown by the -- print command a bit less surprising in this case. Could do better.) -- transactionInferBalancingCosts :: Transaction -> Transaction transactionInferBalancingCosts t@Transaction{tpostings=ps} = t{tpostings=ps'} where ps' = map (costInferrerFor t BalancedVirtualPosting . costInferrerFor t RegularPosting) ps -- | Generate a posting update function which assigns a suitable cost to -- balance the posting, if and as appropriate for the given transaction and -- posting type (real or balanced virtual) (or if we cannot or should not infer -- costs, leaves the posting unchanged). costInferrerFor :: Transaction -> PostingType -> (Posting -> Posting) costInferrerFor t pt = maybe id infercost inferFromAndTo where postings = filter ((==pt).ptype) $ tpostings t pcommodities = map acommodity $ concatMap (amounts . pamount) postings sumamounts = amounts $ sumPostings postings -- amounts normalises to one amount per commodity & price -- We can infer prices if there are no prices given, exactly two commodities in the normalised -- sum of postings in this transaction, and these two have opposite signs. The amount we are -- converting from is the first commodity to appear in the ordered list of postings, and the -- commodity we are converting to is the other. If we cannot infer prices, return Nothing. inferFromAndTo = case sumamounts of [a,b] | noprices, oppositesigns -> asum $ map orderIfMatches pcommodities where noprices = all (isNothing . aprice) sumamounts oppositesigns = signum (aquantity a) /= signum (aquantity b) orderIfMatches x | x == acommodity a = Just (a,b) | x == acommodity b = Just (b,a) | otherwise = Nothing _ -> Nothing -- For each posting, if the posting type matches, there is only a single amount in the posting, -- and the commodity of the amount matches the amount we're converting from, -- then set its cost based on the ratio between fromamount and toamount. infercost (fromamount, toamount) p | [a] <- amounts (pamount p), ptype p == pt, acommodity a == acommodity fromamount = p{ pamount = mixedAmount a{aprice=Just conversionprice} , poriginal = Just $ originalPosting p } | otherwise = p where -- If only one Amount in the posting list matches fromamount we can use TotalPrice. -- Otherwise divide the conversion equally among the Amounts by using a unit price. conversionprice = case filter (== acommodity fromamount) pcommodities of [_] -> TotalPrice $ negate toamount _ -> UnitPrice $ negate unitprice `withPrecision` unitprecision unitprice = aquantity fromamount `divideAmount` toamount unitprecision = case (asprecision $ astyle fromamount, asprecision $ astyle toamount) of (Precision a, Precision b) -> Precision . max 2 $ saturatedAdd a b _ -> NaturalPrecision saturatedAdd a b = if maxBound - a < b then maxBound else a + b -- | Check any balance assertions in the journal and return an error message -- if any of them fail (or if the transaction balancing they require fails). journalCheckBalanceAssertions :: Journal -> Maybe String journalCheckBalanceAssertions = either Just (const Nothing) . journalBalanceTransactions defbalancingopts -- "Transaction balancing", including: inferring missing amounts, -- applying balance assignments, checking transaction balancedness, -- checking balance assertions, respecting posting dates. These things -- are all interdependent. -- WARN tricky algorithm and code ahead. -- -- Code overview as of 20190219, this could/should be simplified/documented more: -- parseAndFinaliseJournal['] (Cli/Utils.hs), journalAddForecast (Common.hs), journalAddBudgetGoalTransactions (BudgetReport.hs), tests (BalanceReport.hs) -- journalBalanceTransactions -- runST -- runExceptT -- balanceTransaction (Transaction.hs) -- balanceTransactionHelper -- runReaderT -- balanceTransactionAndCheckAssertionsB -- addAmountAndCheckAssertionB -- addOrAssignAmountAndCheckAssertionB -- balanceTransactionHelper (Transaction.hs) -- uiCheckBalanceAssertions d ui@UIState{aopts=UIOpts{cliopts_=copts}, ajournal=j} (ErrorScreen.hs) -- journalCheckBalanceAssertions -- journalBalanceTransactions -- transactionWizard, postingsBalanced (Add.hs), tests (Transaction.hs) -- balanceTransaction (Transaction.hs) XXX hledger add won't allow balance assignments + missing amount ? -- | Monad used for statefully balancing/amount-inferring/assertion-checking -- a sequence of transactions. -- Perhaps can be simplified, or would a different ordering of layers make sense ? -- If you see a way, let us know. type Balancing s = ReaderT (BalancingState s) (ExceptT String (ST s)) -- | The state used while balancing a sequence of transactions. data BalancingState s = BalancingState { -- read only bsStyles :: Maybe (M.Map CommoditySymbol AmountStyle) -- ^ commodity display styles ,bsUnassignable :: S.Set AccountName -- ^ accounts where balance assignments may not be used (because of auto posting rules) ,bsAssrt :: Bool -- ^ whether to check balance assertions -- mutable ,bsBalances :: H.HashTable s AccountName MixedAmount -- ^ running account balances, initially empty ,bsTransactions :: STArray s Integer Transaction -- ^ a mutable array of the transactions being balanced -- (for efficiency ? journalBalanceTransactions says: not strictly necessary but avoids a sort at the end I think) } -- | Access the current balancing state, and possibly modify the mutable bits, -- lifting through the Except and Reader layers into the Balancing monad. withRunningBalance :: (BalancingState s -> ST s a) -> Balancing s a withRunningBalance f = ask >>= lift . lift . f -- | Get this account's current exclusive running balance. getRunningBalanceB :: AccountName -> Balancing s MixedAmount getRunningBalanceB acc = withRunningBalance $ \BalancingState{bsBalances} -> do fromMaybe nullmixedamt <$> H.lookup bsBalances acc -- | Add this amount to this account's exclusive running balance. -- Returns the new running balance. addToRunningBalanceB :: AccountName -> MixedAmount -> Balancing s MixedAmount addToRunningBalanceB acc amt = withRunningBalance $ \BalancingState{bsBalances} -> do old <- fromMaybe nullmixedamt <$> H.lookup bsBalances acc let new = maPlus old amt H.insert bsBalances acc new return new -- | Set this account's exclusive running balance to this amount. -- Returns the change in exclusive running balance. setRunningBalanceB :: AccountName -> MixedAmount -> Balancing s MixedAmount setRunningBalanceB acc amt = withRunningBalance $ \BalancingState{bsBalances} -> do old <- fromMaybe nullmixedamt <$> H.lookup bsBalances acc H.insert bsBalances acc amt return $ maMinus amt old -- | Set this account's exclusive running balance to whatever amount -- makes its *inclusive* running balance (the sum of exclusive running -- balances of this account and any subaccounts) be the given amount. -- Returns the change in exclusive running balance. setInclusiveRunningBalanceB :: AccountName -> MixedAmount -> Balancing s MixedAmount setInclusiveRunningBalanceB acc newibal = withRunningBalance $ \BalancingState{bsBalances} -> do oldebal <- fromMaybe nullmixedamt <$> H.lookup bsBalances acc allebals <- H.toList bsBalances let subsibal = -- sum of any subaccounts' running balances maSum . map snd $ filter ((acc `isAccountNamePrefixOf`).fst) allebals let newebal = maMinus newibal subsibal H.insert bsBalances acc newebal return $ maMinus newebal oldebal -- | Update (overwrite) this transaction in the balancing state. updateTransactionB :: Transaction -> Balancing s () updateTransactionB t = withRunningBalance $ \BalancingState{bsTransactions} -> void $ writeArray bsTransactions (tindex t) t -- | Infer any missing amounts and/or conversion costs -- (as needed to balance transactions and satisfy balance assignments); -- and check that all transactions are balanced; -- and (optional) check that all balance assertions pass. -- Or, return an error message (just the first error encountered). -- -- Assumes journalInferCommodityStyles has been called, since those -- affect transaction balancing. -- -- This does multiple things at once because amount inferring, balance -- assignments, balance assertions and posting dates are interdependent. journalBalanceTransactions :: BalancingOpts -> Journal -> Either String Journal journalBalanceTransactions bopts' j' = let -- ensure transactions are numbered, so we can store them by number j@Journal{jtxns=ts} = journalNumberTransactions j' -- display precisions used in balanced checking styles = Just $ journalCommodityStyles j bopts = bopts'{commodity_styles_=styles} -- balance assignments are not allowed on accounts affected by auto postings autopostingaccts = S.fromList . map (paccount . tmprPosting) . concatMap tmpostingrules $ jtxnmodifiers j in -- Store the transactions in a mutable array, which we'll update as we balance them. -- Not strictly necessary but avoids a sort at the end I think. runST $ do balancedtxns <- newListArray (1, toInteger $ length ts) ts -- Process all transactions, or short-circuit with an error. runExceptT $ do -- Two passes are required: -- 1. Step through the transactions, balancing the ones which don't have balance assignments, -- postponing those which do until later. The balanced ones are split into their postings, -- keeping these and the not-yet-balanced transactions in the same relative order. psandts :: [Either Posting Transaction] <- fmap concat $ forM ts $ \case t | null $ assignmentPostings t -> case balanceTransaction bopts t of Left e -> throwError e Right t' -> do lift $ writeArray balancedtxns (tindex t') t' return $ map Left $ tpostings t' t -> return [Right t] -- 2. Step through these items in date order (and preserved same-day order), -- keeping running balances for all accounts. runningbals <- lift $ H.newSized (length $ journalAccountNamesUsed j) flip runReaderT (BalancingState styles autopostingaccts (not $ ignore_assertions_ bopts) runningbals balancedtxns) $ do -- On encountering any not-yet-balanced transaction with a balance assignment, -- enact the balance assignment then finish balancing the transaction. -- And, check any balance assertions encountered along the way. void $ mapM' balanceTransactionAndCheckAssertionsB $ sortOn (either postingDate tdate) psandts -- Return the now fully-balanced and checked transactions. ts' <- lift $ getElems balancedtxns return j{jtxns=ts'} -- | This function is called statefully on each of a date-ordered sequence of -- 1. fully explicit postings from already-balanced transactions and -- 2. not-yet-balanced transactions containing balance assignments. -- It executes balance assignments and finishes balancing the transactions, -- and checks balance assertions on each posting as it goes. -- An error will be thrown if a transaction can't be balanced -- or if an illegal balance assignment is found (cf checkIllegalBalanceAssignment). -- Transaction prices are removed, which helps eg balance-assertions.test: 15. Mix different commodities and assignments. -- This stores the balanced transactions in case 2 but not in case 1. balanceTransactionAndCheckAssertionsB :: Either Posting Transaction -> Balancing s () balanceTransactionAndCheckAssertionsB (Left p@Posting{}) = -- update the account's running balance and check the balance assertion if any void . addAmountAndCheckAssertionB $ postingStripPrices p balanceTransactionAndCheckAssertionsB (Right t@Transaction{tpostings=ps}) = do -- make sure we can handle the balance assignments mapM_ checkIllegalBalanceAssignmentB ps -- for each posting, in date order (though without disturbing their display order), -- 1. infer its amount from the balance assignment if applicable, -- 2. update the account's running balance, and -- 3. check the balance assertion if any. ps' <- ps & zip [1..] -- attach original positions & sortOn (postingDate.snd) -- sort by date & mapM (addOrAssignAmountAndCheckAssertionB) -- infer amount, check assertion on each one <&> sortOn fst -- restore original order <&> map snd -- discard positions -- infer any remaining missing amounts, and make sure the transaction is now fully balanced styles <- R.reader bsStyles case balanceTransactionHelper defbalancingopts{commodity_styles_=styles} t{tpostings=ps'} of Left err -> throwError err Right (t', inferredacctsandamts) -> do -- for each amount just inferred, update the running balance mapM_ (uncurry addToRunningBalanceB) inferredacctsandamts -- and save the balanced transaction. updateTransactionB t' type NumberedPosting = (Integer, Posting) -- | If this posting has an explicit amount, add it to the account's running balance. -- If it has a missing amount and a balance assignment, infer the amount from, and -- reset the running balance to, the assigned balance. -- If it has a missing amount and no balance assignment, leave it for later. -- Then test the balance assertion if any. addOrAssignAmountAndCheckAssertionB :: NumberedPosting -> Balancing s NumberedPosting addOrAssignAmountAndCheckAssertionB (i,p@Posting{paccount=acc, pamount=amt, pbalanceassertion=mba}) -- an explicit posting amount | hasAmount p = do newbal <- addToRunningBalanceB acc amt whenM (R.reader bsAssrt) $ checkBalanceAssertionB p newbal return (i,p) -- no explicit posting amount, but there is a balance assignment | Just BalanceAssertion{baamount,batotal,bainclusive} <- mba = do newbal <- if batotal -- a total balance assignment (==, all commodities) then return $ mixedAmount baamount -- a partial balance assignment (=, one commodity) else do oldbalothercommodities <- filterMixedAmount ((acommodity baamount /=) . acommodity) <$> getRunningBalanceB acc return $ maAddAmount oldbalothercommodities baamount diff <- (if bainclusive then setInclusiveRunningBalanceB else setRunningBalanceB) acc newbal let p' = p{pamount=filterMixedAmount (not . amountIsZero) diff, poriginal=Just $ originalPosting p} whenM (R.reader bsAssrt) $ checkBalanceAssertionB p' newbal return (i,p') -- no explicit posting amount, no balance assignment | otherwise = return (i,p) -- | Add the posting's amount to its account's running balance, and -- optionally check the posting's balance assertion if any. -- The posting is expected to have an explicit amount (otherwise this does nothing). -- Adding and checking balance assertions are tightly paired because we -- need to see the balance as it stands after each individual posting. addAmountAndCheckAssertionB :: Posting -> Balancing s Posting addAmountAndCheckAssertionB p | hasAmount p = do newbal <- addToRunningBalanceB (paccount p) $ pamount p whenM (R.reader bsAssrt) $ checkBalanceAssertionB p newbal return p addAmountAndCheckAssertionB p = return p -- | Check a posting's balance assertion against the given actual balance, and -- return an error if the assertion is not satisfied. -- If the assertion is partial, unasserted commodities in the actual balance -- are ignored; if it is total, they will cause the assertion to fail. checkBalanceAssertionB :: Posting -> MixedAmount -> Balancing s () checkBalanceAssertionB p@Posting{pbalanceassertion=Just (BalanceAssertion{baamount,batotal})} actualbal = forM_ (baamount : otheramts) $ \amt -> checkBalanceAssertionOneCommodityB p amt actualbal where assertedcomm = acommodity baamount otheramts | batotal = map (\a -> a{aquantity=0}) . amountsRaw $ filterMixedAmount ((/=assertedcomm).acommodity) actualbal | otherwise = [] checkBalanceAssertionB _ _ = return () -- | Does this (single commodity) expected balance match the amount of that -- commodity in the given (multicommodity) actual balance ? If not, returns a -- balance assertion failure message based on the provided posting. To match, -- the amounts must be exactly equal (display precision is ignored here). -- If the assertion is inclusive, the expected amount is compared with the account's -- subaccount-inclusive balance; otherwise, with the subaccount-exclusive balance. checkBalanceAssertionOneCommodityB :: Posting -> Amount -> MixedAmount -> Balancing s () checkBalanceAssertionOneCommodityB p@Posting{paccount=assertedacct} assertedamt actualbal = do let isinclusive = maybe False bainclusive $ pbalanceassertion p let istotal = maybe False batotal $ pbalanceassertion p actualbal' <- if isinclusive then -- sum the running balances of this account and any of its subaccounts seen so far withRunningBalance $ \BalancingState{bsBalances} -> H.foldM (\ibal (acc, amt) -> return $ if assertedacct==acc || assertedacct `isAccountNamePrefixOf` acc then maPlus ibal amt else ibal) nullmixedamt bsBalances else return actualbal let assertedcomm = acommodity assertedamt actualbalincomm = headDef nullamt . amountsRaw . filterMixedAmountByCommodity assertedcomm $ actualbal' pass = aquantity -- traceWith (("asserted:"++).showAmountDebug) assertedamt == aquantity -- traceWith (("actual:"++).showAmountDebug) actualbalincomm errmsg = chomp $ printf (unlines [ "%s:", "%s\n", "This balance assertion failed.", -- "date: %s", "In account: %s", "and commodity: %s", -- "display precision: %d", "this balance was asserted: %s", -- (at display precision: %s)", "but the calculated balance is: %s", -- (at display precision: %s)", "a difference of: %s", "", "Consider viewing this account's calculated balances to troubleshoot. Eg:", "", "hledger reg '%s'%s -I # -f FILE" ]) (sourcePosPretty pos) (textChomp ex) -- (showDate $ postingDate p) (if isinclusive then printf "%-30s (including subaccounts)" acct else acct) (if istotal then printf "%-30s (no other commodities allowed)" (T.unpack assertedcomm) else (T.unpack assertedcomm)) -- (asprecision $ astyle actualbalincommodity) -- should be the standard display precision I think (show $ aquantity assertedamt) -- (showAmount assertedamt) (show $ aquantity actualbalincomm) -- (showAmount actualbalincommodity) (show $ aquantity assertedamt - aquantity actualbalincomm) (acct ++ if isinclusive then "" else "$") (if istotal then "" else (" cur:" ++ quoteForCommandLine (T.unpack assertedcomm))) where acct = T.unpack $ paccount p ass = fromJust $ pbalanceassertion p -- PARTIAL: fromJust won't fail, there is a balance assertion pos = baposition ass (_,_,_,ex) = makeBalanceAssertionErrorExcerpt p unless pass $ throwError errmsg -- | Throw an error if this posting is trying to do an illegal balance assignment. checkIllegalBalanceAssignmentB :: Posting -> Balancing s () checkIllegalBalanceAssignmentB p = do checkBalanceAssignmentPostingDateB p checkBalanceAssignmentUnassignableAccountB p -- XXX these should show position. annotateErrorWithTransaction t ? -- | Throw an error if this posting is trying to do a balance assignment and -- has a custom posting date (which makes amount inference too hard/impossible). checkBalanceAssignmentPostingDateB :: Posting -> Balancing s () checkBalanceAssignmentPostingDateB p = when (hasBalanceAssignment p && isJust (pdate p)) $ throwError $ chomp $ unlines [ "Balance assignments and custom posting dates may not be combined." ,"" ,chomp1 $ T.unpack $ maybe (T.unlines $ showPostingLines p) showTransaction $ ptransaction p ,"Balance assignments may not be used on postings with a custom posting date" ,"(it makes balancing the journal impossible)." ,"Please write the posting amount explicitly (or remove the posting date)." ] -- | Throw an error if this posting is trying to do a balance assignment and -- the account does not allow balance assignments (eg because it is referenced -- by an auto posting rule, which might generate additional postings to it). checkBalanceAssignmentUnassignableAccountB :: Posting -> Balancing s () checkBalanceAssignmentUnassignableAccountB p = do unassignable <- R.asks bsUnassignable when (hasBalanceAssignment p && paccount p `S.member` unassignable) $ throwError $ chomp $ unlines [ "Balance assignments and auto postings may not be combined." ,"" ,chomp1 $ T.unpack $ maybe (T.unlines $ showPostingLines p) (showTransaction) $ ptransaction p ,"Balance assignments may not be used on accounts affected by auto posting rules" ,"(it makes balancing the journal impossible)." ,"Please write the posting amount explicitly (or remove the auto posting rule(s))." ] -- lenses makeHledgerClassyLenses ''BalancingOpts -- tests tests_Balancing :: TestTree tests_Balancing = testGroup "Balancing" [ testCase "transactionInferBalancingAmount" $ do (fst <$> transactionInferBalancingAmount M.empty nulltransaction) @?= Right nulltransaction (fst <$> transactionInferBalancingAmount M.empty nulltransaction{tpostings = ["a" `post` usd (-5), "b" `post` missingamt]}) @?= Right nulltransaction{tpostings = ["a" `post` usd (-5), "b" `post` usd 5]} (fst <$> transactionInferBalancingAmount M.empty nulltransaction{tpostings = ["a" `post` usd (-5), "b" `post` (eur 3 @@ usd 4), "c" `post` missingamt]}) @?= Right nulltransaction{tpostings = ["a" `post` usd (-5), "b" `post` (eur 3 @@ usd 4), "c" `post` usd 1]} , testGroup "balanceTransaction" [ testCase "detect unbalanced entry, sign error" $ assertLeft (balanceTransaction defbalancingopts (Transaction 0 "" nullsourcepos (fromGregorian 2007 01 28) Nothing Unmarked "" "test" "" [] [posting {paccount = "a", pamount = mixedAmount (usd 1)}, posting {paccount = "b", pamount = mixedAmount (usd 1)}])) ,testCase "detect unbalanced entry, multiple missing amounts" $ assertLeft $ balanceTransaction defbalancingopts (Transaction 0 "" nullsourcepos (fromGregorian 2007 01 28) Nothing Unmarked "" "test" "" [] [ posting {paccount = "a", pamount = missingmixedamt} , posting {paccount = "b", pamount = missingmixedamt} ]) ,testCase "one missing amount is inferred" $ (pamount . last . tpostings <$> balanceTransaction defbalancingopts (Transaction 0 "" nullsourcepos (fromGregorian 2007 01 28) Nothing Unmarked "" "" "" [] [posting {paccount = "a", pamount = mixedAmount (usd 1)}, posting {paccount = "b", pamount = missingmixedamt}])) @?= Right (mixedAmount $ usd (-1)) ,testCase "conversion price is inferred" $ (pamount . head . tpostings <$> balanceTransaction defbalancingopts (Transaction 0 "" nullsourcepos (fromGregorian 2007 01 28) Nothing Unmarked "" "" "" [] [ posting {paccount = "a", pamount = mixedAmount (usd 1.35)} , posting {paccount = "b", pamount = mixedAmount (eur (-1))} ])) @?= Right (mixedAmount $ usd 1.35 @@ eur 1) ,testCase "balanceTransaction balances based on cost if there are unit prices" $ assertRight $ balanceTransaction defbalancingopts (Transaction 0 "" nullsourcepos (fromGregorian 2011 01 01) Nothing Unmarked "" "" "" [] [ posting {paccount = "a", pamount = mixedAmount $ usd 1 `at` eur 2} , posting {paccount = "a", pamount = mixedAmount $ usd (-2) `at` eur 1} ]) ,testCase "balanceTransaction balances based on cost if there are total prices" $ assertRight $ balanceTransaction defbalancingopts (Transaction 0 "" nullsourcepos (fromGregorian 2011 01 01) Nothing Unmarked "" "" "" [] [ posting {paccount = "a", pamount = mixedAmount $ usd 1 @@ eur 1} , posting {paccount = "a", pamount = mixedAmount $ usd (-2) @@ eur (-1)} ]) ] , testGroup "isTransactionBalanced" [ testCase "detect balanced" $ assertBool "" $ isTransactionBalanced defbalancingopts $ Transaction 0 "" nullsourcepos (fromGregorian 2009 01 01) Nothing Unmarked "" "a" "" [] [ posting {paccount = "b", pamount = mixedAmount (usd 1.00)} , posting {paccount = "c", pamount = mixedAmount (usd (-1.00))} ] ,testCase "detect unbalanced" $ assertBool "" $ not $ isTransactionBalanced defbalancingopts $ Transaction 0 "" nullsourcepos (fromGregorian 2009 01 01) Nothing Unmarked "" "a" "" [] [ posting {paccount = "b", pamount = mixedAmount (usd 1.00)} , posting {paccount = "c", pamount = mixedAmount (usd (-1.01))} ] ,testCase "detect unbalanced, one posting" $ assertBool "" $ not $ isTransactionBalanced defbalancingopts $ Transaction 0 "" nullsourcepos (fromGregorian 2009 01 01) Nothing Unmarked "" "a" "" [] [posting {paccount = "b", pamount = mixedAmount (usd 1.00)}] ,testCase "one zero posting is considered balanced for now" $ assertBool "" $ isTransactionBalanced defbalancingopts $ Transaction 0 "" nullsourcepos (fromGregorian 2009 01 01) Nothing Unmarked "" "a" "" [] [posting {paccount = "b", pamount = mixedAmount (usd 0)}] ,testCase "virtual postings don't need to balance" $ assertBool "" $ isTransactionBalanced defbalancingopts $ Transaction 0 "" nullsourcepos (fromGregorian 2009 01 01) Nothing Unmarked "" "a" "" [] [ posting {paccount = "b", pamount = mixedAmount (usd 1.00)} , posting {paccount = "c", pamount = mixedAmount (usd (-1.00))} , posting {paccount = "d", pamount = mixedAmount (usd 100), ptype = VirtualPosting} ] ,testCase "balanced virtual postings need to balance among themselves" $ assertBool "" $ not $ isTransactionBalanced defbalancingopts $ Transaction 0 "" nullsourcepos (fromGregorian 2009 01 01) Nothing Unmarked "" "a" "" [] [ posting {paccount = "b", pamount = mixedAmount (usd 1.00)} , posting {paccount = "c", pamount = mixedAmount (usd (-1.00))} , posting {paccount = "d", pamount = mixedAmount (usd 100), ptype = BalancedVirtualPosting} ] ,testCase "balanced virtual postings need to balance among themselves (2)" $ assertBool "" $ isTransactionBalanced defbalancingopts $ Transaction 0 "" nullsourcepos (fromGregorian 2009 01 01) Nothing Unmarked "" "a" "" [] [ posting {paccount = "b", pamount = mixedAmount (usd 1.00)} , posting {paccount = "c", pamount = mixedAmount (usd (-1.00))} , posting {paccount = "d", pamount = mixedAmount (usd 100), ptype = BalancedVirtualPosting} , posting {paccount = "3", pamount = mixedAmount (usd (-100)), ptype = BalancedVirtualPosting} ] ] ,testGroup "journalBalanceTransactions" [ testCase "missing-amounts" $ do let ej = journalBalanceTransactions defbalancingopts $ samplejournalMaybeExplicit False assertRight ej journalPostings <$> ej @?= Right (journalPostings samplejournal) ,testCase "balance-assignment" $ do let ej = journalBalanceTransactions defbalancingopts $ --2019/01/01 -- (a) = 1 nulljournal{ jtxns = [ transaction (fromGregorian 2019 01 01) [ vpost' "a" missingamt (balassert (num 1)) ] ]} assertRight ej case ej of Right j -> (jtxns j & head & tpostings & head & pamount & amountsRaw) @?= [num 1] Left _ -> error' "balance-assignment test: shouldn't happen" ,testCase "same-day-1" $ do assertRight $ journalBalanceTransactions defbalancingopts $ --2019/01/01 -- (a) = 1 --2019/01/01 -- (a) 1 = 2 nulljournal{ jtxns = [ transaction (fromGregorian 2019 01 01) [ vpost' "a" missingamt (balassert (num 1)) ] ,transaction (fromGregorian 2019 01 01) [ vpost' "a" (num 1) (balassert (num 2)) ] ]} ,testCase "same-day-2" $ do assertRight $ journalBalanceTransactions defbalancingopts $ --2019/01/01 -- (a) 2 = 2 --2019/01/01 -- b 1 -- a --2019/01/01 -- a 0 = 1 nulljournal{ jtxns = [ transaction (fromGregorian 2019 01 01) [ vpost' "a" (num 2) (balassert (num 2)) ] ,transaction (fromGregorian 2019 01 01) [ post' "b" (num 1) Nothing ,post' "a" missingamt Nothing ] ,transaction (fromGregorian 2019 01 01) [ post' "a" (num 0) (balassert (num 1)) ] ]} ,testCase "out-of-order" $ do assertRight $ journalBalanceTransactions defbalancingopts $ --2019/1/2 -- (a) 1 = 2 --2019/1/1 -- (a) 1 = 1 nulljournal{ jtxns = [ transaction (fromGregorian 2019 01 02) [ vpost' "a" (num 1) (balassert (num 2)) ] ,transaction (fromGregorian 2019 01 01) [ vpost' "a" (num 1) (balassert (num 1)) ] ]} ] ,testGroup "commodityStylesFromAmounts" $ [ -- Journal similar to the one on #1091: -- 2019/09/24 -- (a) 1,000.00 -- -- 2019/09/26 -- (a) 1000,000 -- testCase "1091a" $ do commodityStylesFromAmounts [ nullamt{aquantity=1000, astyle=AmountStyle L False (Precision 3) (Just ',') Nothing} ,nullamt{aquantity=1000, astyle=AmountStyle L False (Precision 2) (Just '.') (Just (DigitGroups ',' [3]))} ] @?= -- The commodity style should have period as decimal mark -- and comma as digit group mark. Right (M.fromList [ ("", AmountStyle L False (Precision 3) (Just '.') (Just (DigitGroups ',' [3]))) ]) -- same journal, entries in reverse order ,testCase "1091b" $ do commodityStylesFromAmounts [ nullamt{aquantity=1000, astyle=AmountStyle L False (Precision 2) (Just '.') (Just (DigitGroups ',' [3]))} ,nullamt{aquantity=1000, astyle=AmountStyle L False (Precision 3) (Just ',') Nothing} ] @?= -- The commodity style should have period as decimal mark -- and comma as digit group mark. Right (M.fromList [ ("", AmountStyle L False (Precision 3) (Just '.') (Just (DigitGroups ',' [3]))) ]) ] ] hledger-lib-1.30/Hledger/Data/Dates.hs0000644000000000000000000013400114434445206015625 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE NoMonoLocalBinds #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} {-| Date parsing and utilities for hledger. For date and time values, we use the standard Day and UTCTime types. A 'SmartDate' is a date which may be partially-specified or relative. Eg 2008\/12\/31, but also 2008\/12, 12\/31, tomorrow, last week, next year, in 5 days, in -3 quarters. We represent these as a triple of strings like (\"2008\",\"12\",\"\"), (\"\",\"\",\"tomorrow\"), (\"\",\"last\",\"week\"). A 'DateSpan' is the span of time between two specific calendar dates, or an open-ended span where one or both dates are unspecified. (A date span with both ends unspecified matches all dates.) An 'Interval' is ledger's \"reporting interval\" - weekly, monthly, quarterly, etc. 'Period' will probably replace DateSpan in due course. -} -- XXX fromGregorian silently clips bad dates, use fromGregorianValid instead ? module Hledger.Data.Dates ( -- * Misc date handling utilities fromEFDay, modifyEFDay, getCurrentDay, getCurrentMonth, getCurrentYear, nulldate, spanContainsDate, periodContainsDate, parsedateM, showDate, showEFDate, showDateSpan, showDateSpanDebug, showDateSpanMonthAbbrev, elapsedSeconds, prevday, periodexprp, parsePeriodExpr, parsePeriodExpr', nulldatespan, emptydatespan, datesepchar, datesepchars, isDateSepChar, spanStart, spanEnd, spanStartYear, spanEndYear, spanYears, spansSpan, spanIntersect, spansIntersect, spanDefaultsFrom, spanUnion, spansUnion, daysSpan, latestSpanContaining, smartdate, splitSpan, spansFromBoundaries, groupByDateSpan, fixSmartDate, fixSmartDateStr, fixSmartDateStrEither, fixSmartDateStrEither', yearp, daysInSpan, tests_Dates , intervalBoundaryBefore) where import Prelude hiding (Applicative(..)) import Control.Applicative (Applicative(..)) import Control.Applicative.Permutations import Control.Monad (guard, unless) import qualified Control.Monad.Fail as Fail (MonadFail, fail) import Data.Char (digitToInt, isDigit, ord) import Data.Default (def) import Data.Foldable (asum) import Data.Function (on) import Data.Functor (($>)) import Data.List (elemIndex, group, sort, sortBy) import Data.Maybe (catMaybes, fromMaybe, isJust, mapMaybe) import Data.Ord (comparing) import qualified Data.Set as Set import Data.Text (Text) import qualified Data.Text as T import Data.Time.Format hiding (months) import Data.Time.Calendar (Day, addDays, addGregorianYearsClip, addGregorianMonthsClip, diffDays, fromGregorian, fromGregorianValid, toGregorian) import Data.Time.Calendar.OrdinalDate (fromMondayStartWeek, mondayStartWeek) import Data.Time.Clock (UTCTime, diffUTCTime) import Data.Time.LocalTime (getZonedTime, localDay, zonedTimeToLocalTime) import Safe (headMay, lastMay, maximumMay, minimumMay) import Text.Megaparsec import Text.Megaparsec.Char (char, char', digitChar, string, string') import Text.Megaparsec.Char.Lexer (decimal, signed) import Text.Printf (printf) import Hledger.Data.Types import Hledger.Data.Period import Hledger.Utils -- Help ppShow parse and line-wrap DateSpans better in debug output. instance Show DateSpan where show s = "DateSpan " ++ T.unpack (showDateSpan s) showDate :: Day -> Text showDate = T.pack . show showEFDate :: EFDay -> Text showEFDate = showDate . fromEFDay -- | Render a datespan as a display string, abbreviating into a -- compact form if possible. -- Warning, hides whether dates are Exact or Flex. showDateSpan :: DateSpan -> Text showDateSpan = showPeriod . dateSpanAsPeriod -- | Show a DateSpan with its begin/end dates, exact or flex. showDateSpanDebug :: DateSpan -> String showDateSpanDebug (DateSpan b e)= "DateSpan (" <> show b <> ") (" <> show e <> ")" -- | Like showDateSpan, but show month spans as just the abbreviated month name -- in the current locale. showDateSpanMonthAbbrev :: DateSpan -> Text showDateSpanMonthAbbrev = showPeriodMonthAbbrev . dateSpanAsPeriod -- | Get the current local date. getCurrentDay :: IO Day getCurrentDay = localDay . zonedTimeToLocalTime <$> getZonedTime -- | Get the current local month number. getCurrentMonth :: IO Int getCurrentMonth = second3 . toGregorian <$> getCurrentDay -- | Get the current local year. getCurrentYear :: IO Integer getCurrentYear = first3 . toGregorian <$> getCurrentDay elapsedSeconds :: Fractional a => UTCTime -> UTCTime -> a elapsedSeconds t1 = realToFrac . diffUTCTime t1 spanStart :: DateSpan -> Maybe Day spanStart (DateSpan d _) = fromEFDay <$> d spanEnd :: DateSpan -> Maybe Day spanEnd (DateSpan _ d) = fromEFDay <$> d spanStartDate :: DateSpan -> Maybe EFDay spanStartDate (DateSpan d _) = d spanEndDate :: DateSpan -> Maybe EFDay spanEndDate (DateSpan _ d) = d spanStartYear :: DateSpan -> Maybe Year spanStartYear (DateSpan d _) = fmap (first3 . toGregorian . fromEFDay) d spanEndYear :: DateSpan -> Maybe Year spanEndYear (DateSpan d _) = fmap (first3 . toGregorian. fromEFDay) d -- | Get the 0-2 years mentioned explicitly in a DateSpan. spanYears :: DateSpan -> [Year] spanYears (DateSpan ma mb) = mapMaybe (fmap (first3 . toGregorian. fromEFDay)) [ma,mb] -- might be useful later: http://en.wikipedia.org/wiki/Allen%27s_interval_algebra -- | Get overall span enclosing multiple sequentially ordered spans. -- The start and end date will be exact or flexible depending on -- the first span's start date and last span's end date. spansSpan :: [DateSpan] -> DateSpan spansSpan spans = DateSpan (spanStartDate =<< headMay spans) (spanEndDate =<< lastMay spans) -- | Split a DateSpan into consecutive exact spans of the specified Interval. -- If the first argument is true and the interval is Weeks, Months, Quarters or Years, -- the start date will be adjusted backward if needed to nearest natural interval boundary -- (a monday, first of month, first of quarter or first of year). -- If no interval is specified, the original span is returned. -- If the original span is the null date span, ie unbounded, the null date span is returned. -- If the original span is empty, eg if the end date is <= the start date, no spans are returned. -- -- ==== Examples: -- >>> let t i y1 m1 d1 y2 m2 d2 = splitSpan True i $ DateSpan (Just $ Flex $ fromGregorian y1 m1 d1) (Just $ Flex $ fromGregorian y2 m2 d2) -- >>> t NoInterval 2008 01 01 2009 01 01 -- [DateSpan 2008] -- >>> t (Quarters 1) 2008 01 01 2009 01 01 -- [DateSpan 2008Q1,DateSpan 2008Q2,DateSpan 2008Q3,DateSpan 2008Q4] -- >>> splitSpan True (Quarters 1) nulldatespan -- [DateSpan ..] -- >>> t (Days 1) 2008 01 01 2008 01 01 -- an empty datespan -- [] -- >>> t (Quarters 1) 2008 01 01 2008 01 01 -- [] -- >>> t (Months 1) 2008 01 01 2008 04 01 -- [DateSpan 2008-01,DateSpan 2008-02,DateSpan 2008-03] -- >>> t (Months 2) 2008 01 01 2008 04 01 -- [DateSpan 2008-01-01..2008-02-29,DateSpan 2008-03-01..2008-04-30] -- >>> t (Weeks 1) 2008 01 01 2008 01 15 -- [DateSpan 2007-12-31W01,DateSpan 2008-01-07W02,DateSpan 2008-01-14W03] -- >>> t (Weeks 2) 2008 01 01 2008 01 15 -- [DateSpan 2007-12-31..2008-01-13,DateSpan 2008-01-14..2008-01-27] -- >>> t (DayOfMonth 2) 2008 01 01 2008 04 01 -- [DateSpan 2007-12-02..2008-01-01,DateSpan 2008-01-02..2008-02-01,DateSpan 2008-02-02..2008-03-01,DateSpan 2008-03-02..2008-04-01] -- >>> t (WeekdayOfMonth 2 4) 2011 01 01 2011 02 15 -- [DateSpan 2010-12-09..2011-01-12,DateSpan 2011-01-13..2011-02-09,DateSpan 2011-02-10..2011-03-09] -- >>> t (DaysOfWeek [2]) 2011 01 01 2011 01 15 -- [DateSpan 2010-12-28..2011-01-03,DateSpan 2011-01-04..2011-01-10,DateSpan 2011-01-11..2011-01-17] -- >>> t (DayOfYear 11 29) 2011 10 01 2011 10 15 -- [DateSpan 2010-11-29..2011-11-28] -- >>> t (DayOfYear 11 29) 2011 12 01 2012 12 15 -- [DateSpan 2011-11-29..2012-11-28,DateSpan 2012-11-29..2013-11-28] -- splitSpan :: Bool -> Interval -> DateSpan -> [DateSpan] splitSpan _ _ (DateSpan Nothing Nothing) = [DateSpan Nothing Nothing] splitSpan _ _ ds | isEmptySpan ds = [] splitSpan _ _ ds@(DateSpan (Just s) (Just e)) | s == e = [ds] splitSpan _ NoInterval ds = [ds] splitSpan _ (Days n) ds = splitspan id addDays n ds splitSpan adjust (Weeks n) ds = splitspan (if adjust then startofweek else id) addDays (7*n) ds splitSpan adjust (Months n) ds = splitspan (if adjust then startofmonth else id) addGregorianMonthsClip n ds splitSpan adjust (Quarters n) ds = splitspan (if adjust then startofquarter else id) addGregorianMonthsClip (3*n) ds splitSpan adjust (Years n) ds = splitspan (if adjust then startofyear else id) addGregorianYearsClip n ds splitSpan _ (DayOfMonth dom) ds = splitspan (nthdayofmonthcontaining dom) (addGregorianMonthsToMonthday dom) 1 ds splitSpan _ (DayOfYear m n) ds = splitspan (nthdayofyearcontaining m n) (addGregorianYearsClip) 1 ds splitSpan _ (WeekdayOfMonth n wd) ds = splitspan (nthweekdayofmonthcontaining n wd) advancemonths 1 ds where advancemonths 0 = id advancemonths w = advancetonthweekday n wd . startofmonth . addGregorianMonthsClip w splitSpan _ (DaysOfWeek []) ds = [ds] splitSpan _ (DaysOfWeek days@(n:_)) ds = spansFromBoundaries e bdrys where (s, e) = dateSpanSplitLimits (nthdayofweekcontaining n) nextday ds bdrys = concatMap (flip map starts . addDays) [0,7..] -- The first representative of each weekday starts = map (\d -> addDays (toInteger $ d - n) $ nthdayofweekcontaining n s) days -- Like addGregorianMonthsClip, add one month to the given date, clipping when needed -- to fit it within the next month's length. But also, keep a target day of month in mind, -- and revert to that or as close to it as possible in subsequent longer months. -- Eg, using it to step through 31sts gives 1/31, 2/28, 3/31, 4/30, 5/31.. addGregorianMonthsToMonthday :: MonthDay -> Integer -> Day -> Day addGregorianMonthsToMonthday dom n d = let (y,m,_) = toGregorian $ addGregorianMonthsClip n d in fromGregorian y m dom -- Split the given span into exact spans using the provided helper functions: -- 1. The start function is applied to the span's start date to get the first sub-span's start date. -- 2. The addInterval function is used to calculate the subsequent spans' start dates, -- possibly with stride increased by the mult multiplier. -- It should adapt to spans of varying length, eg if splitting on "every 31st of month" -- addInterval should adjust to 28/29/30 in short months but return to 31 in the long months. splitspan :: (Day -> Day) -> (Integer -> Day -> Day) -> Int -> DateSpan -> [DateSpan] splitspan start addInterval mult ds = spansFromBoundaries e bdrys where (s, e) = dateSpanSplitLimits start (addInterval (toInteger mult)) ds bdrys = mapM (addInterval . toInteger) [0,mult..] $ start s -- | Fill in missing start/end dates for calculating 'splitSpan'. dateSpanSplitLimits :: (Day -> Day) -> (Day -> Day) -> DateSpan -> (Day, Day) dateSpanSplitLimits start _ (DateSpan (Just s) (Just e)) = (start $ fromEFDay s, fromEFDay e) dateSpanSplitLimits start next (DateSpan (Just s) Nothing) = (start $ fromEFDay s, next $ start $ fromEFDay s) dateSpanSplitLimits start next (DateSpan Nothing (Just e)) = (start $ fromEFDay e, next $ start $ fromEFDay e) dateSpanSplitLimits _ _ (DateSpan Nothing Nothing) = error "dateSpanSplitLimits: should not be nulldatespan" -- PARTIAL: This case should have been handled in splitSpan -- | Construct a list of exact 'DateSpan's from a list of boundaries, which fit within a given range. spansFromBoundaries :: Day -> [Day] -> [DateSpan] spansFromBoundaries e bdrys = zipWith (DateSpan `on` (Just . Exact)) (takeWhile (< e) bdrys) $ drop 1 bdrys -- | Count the days in a DateSpan, or if it is open-ended return Nothing. daysInSpan :: DateSpan -> Maybe Integer daysInSpan (DateSpan (Just d1) (Just d2)) = Just $ diffDays (fromEFDay d2) (fromEFDay d1) daysInSpan _ = Nothing -- | Is this an empty span, ie closed with the end date on or before the start date ? isEmptySpan :: DateSpan -> Bool isEmptySpan (DateSpan (Just s) (Just e)) = e <= s isEmptySpan _ = False -- | Does the span include the given date ? spanContainsDate :: DateSpan -> Day -> Bool spanContainsDate (DateSpan Nothing Nothing) _ = True spanContainsDate (DateSpan Nothing (Just e)) d = d < fromEFDay e spanContainsDate (DateSpan (Just b) Nothing) d = d >= fromEFDay b spanContainsDate (DateSpan (Just b) (Just e)) d = d >= fromEFDay b && d < fromEFDay e -- | Does the period include the given date ? -- (Here to avoid import cycle). periodContainsDate :: Period -> Day -> Bool periodContainsDate p = spanContainsDate (periodAsDateSpan p) -- | Group elements based on where they fall in a list of 'DateSpan's without -- gaps. The precondition is not checked. groupByDateSpan :: Bool -> (a -> Day) -> [DateSpan] -> [a] -> [(DateSpan, [a])] groupByDateSpan showempty date colspans = groupByCols colspans . dropWhile (beforeStart . fst) . sortBy (comparing fst) . map (\x -> (date x, x)) where groupByCols [] _ = [] groupByCols (c:cs) [] = if showempty then (c, []) : groupByCols cs [] else [] groupByCols (c:cs) ps = (c, map snd matches) : groupByCols cs later where (matches, later) = span ((spanEnd c >) . Just . fst) ps beforeStart = maybe (const True) (>) $ spanStart =<< headMay colspans -- | Calculate the intersection of a number of datespans. spansIntersect [] = nulldatespan spansIntersect [d] = d spansIntersect (d:ds) = d `spanIntersect` (spansIntersect ds) -- | Calculate the intersection of two datespans. -- -- For non-intersecting spans, gives an empty span beginning on the second's start date: -- >>> DateSpan (Just $ Flex $ fromGregorian 2018 01 01) (Just $ Flex $ fromGregorian 2018 01 03) `spanIntersect` DateSpan (Just $ Flex $ fromGregorian 2018 01 03) (Just $ Flex $ fromGregorian 2018 01 05) -- DateSpan 2018-01-03..2018-01-02 spanIntersect (DateSpan b1 e1) (DateSpan b2 e2) = DateSpan b e where b = latest b1 b2 e = earliest e1 e2 -- | Fill any unspecified dates in the first span with the dates from -- the second one. Sort of a one-way spanIntersect. spanDefaultsFrom (DateSpan a1 b1) (DateSpan a2 b2) = DateSpan a b where a = if isJust a1 then a1 else a2 b = if isJust b1 then b1 else b2 -- | Calculate the union of a number of datespans. spansUnion [] = nulldatespan spansUnion [d] = d spansUnion (d:ds) = d `spanUnion` (spansUnion ds) -- | Calculate the union of two datespans. spanUnion (DateSpan b1 e1) (DateSpan b2 e2) = DateSpan b e where b = earliest b1 b2 e = latest e1 e2 latest d Nothing = d latest Nothing d = d latest (Just d1) (Just d2) = Just $ max d1 d2 earliest d Nothing = d earliest Nothing d = d earliest (Just d1) (Just d2) = Just $ min d1 d2 -- | Calculate the minimal DateSpan containing all of the given Days (in the -- usual exclusive-end-date sense: beginning on the earliest, and ending on -- the day after the latest). daysSpan :: [Day] -> DateSpan daysSpan ds = DateSpan (Exact <$> minimumMay ds) (Exact . addDays 1 <$> maximumMay ds) -- | Select the DateSpan containing a given Day, if any, from a given list of -- DateSpans. -- -- If the DateSpans are non-overlapping, this returns the unique containing -- DateSpan, if it exists. If the DateSpans are overlapping, it will return the -- containing DateSpan with the latest start date, and then latest end date. -- Note: This will currently return `DateSpan (Just s) (Just e)` before it will -- return `DateSpan (Just s) Nothing`. It's unclear which behaviour is desired. -- This is irrelevant at the moment as it's never applied to any list with -- overlapping DateSpans. latestSpanContaining :: [DateSpan] -> Day -> Maybe DateSpan latestSpanContaining datespans = go where go day = do spn <- Set.lookupLT supSpan spanSet guard $ spanContainsDate spn day return spn where -- The smallest DateSpan larger than any DateSpan containing day. supSpan = DateSpan (Just $ Exact $ addDays 1 day) Nothing spanSet = Set.fromList $ filter (not . isEmptySpan) datespans -- | Parse a period expression to an Interval and overall DateSpan using -- the provided reference date, or return a parse error. parsePeriodExpr :: Day -> Text -> Either HledgerParseErrors (Interval, DateSpan) parsePeriodExpr refdate s = parsewith (periodexprp refdate <* eof) (T.toLower s) -- | Like parsePeriodExpr, but call error' on failure. parsePeriodExpr' :: Day -> Text -> (Interval, DateSpan) parsePeriodExpr' refdate s = either (error' . ("failed to parse:" ++) . customErrorBundlePretty) id $ -- PARTIAL: parsePeriodExpr refdate s -- | Show a DateSpan as a human-readable pseudo-period-expression string. -- dateSpanAsText :: DateSpan -> String -- dateSpanAsText (DateSpan Nothing Nothing) = "all" -- dateSpanAsText (DateSpan Nothing (Just e)) = printf "to %s" (show e) -- dateSpanAsText (DateSpan (Just b) Nothing) = printf "from %s" (show b) -- dateSpanAsText (DateSpan (Just b) (Just e)) = printf "%s to %s" (show b) (show e) -- | Convert a single smart date string to a date span using the provided -- reference date, or raise an error. -- spanFromSmartDateString :: Day -> String -> DateSpan -- spanFromSmartDateString refdate s = spanFromSmartDate refdate sdate -- where -- sdate = fromparse $ parsewith smartdateonly s spanFromSmartDate :: Day -> SmartDate -> DateSpan spanFromSmartDate refdate sdate = DateSpan (Just b) (Just e) where (ry,rm,_) = toGregorian refdate (b,e) = span' sdate where span' :: SmartDate -> (EFDay, EFDay) span' (SmartCompleteDate day) = (Exact day, Exact $ nextday day) span' (SmartAssumeStart y Nothing) = (Flex $ startofyear day, Flex $ nextyear day) where day = fromGregorian y 1 1 span' (SmartAssumeStart y (Just m)) = (Flex $ startofmonth day, Flex $ nextmonth day) where day = fromGregorian y m 1 span' (SmartFromReference m d) = (Exact day, Exact $ nextday day) where day = fromGregorian ry (fromMaybe rm m) d span' (SmartMonth m) = (Flex $ startofmonth day, Flex $ nextmonth day) where day = fromGregorian ry m 1 span' (SmartRelative n Day) = (Exact $ addDays n refdate, Exact $ addDays (n+1) refdate) span' (SmartRelative n Week) = (Flex $ addDays (7*n) d, Flex $ addDays (7*n+7) d) where d = thisweek refdate span' (SmartRelative n Month) = (Flex $ addGregorianMonthsClip n d, Flex $ addGregorianMonthsClip (n+1) d) where d = thismonth refdate span' (SmartRelative n Quarter) = (Flex $ addGregorianMonthsClip (3*n) d, Flex $ addGregorianMonthsClip (3*n+3) d) where d = thisquarter refdate span' (SmartRelative n Year) = (Flex $ addGregorianYearsClip n d, Flex $ addGregorianYearsClip (n+1) d) where d = thisyear refdate -- showDay :: Day -> String -- showDay day = printf "%04d/%02d/%02d" y m d where (y,m,d) = toGregorian day -- | Convert a smart date string to an explicit yyyy\/mm\/dd string using -- the provided reference date, or raise an error. fixSmartDateStr :: Day -> Text -> Text fixSmartDateStr d s = either (error' . printf "could not parse date %s %s" (show s) . show) id $ -- PARTIAL: (fixSmartDateStrEither d s :: Either HledgerParseErrors Text) -- | A safe version of fixSmartDateStr. fixSmartDateStrEither :: Day -> Text -> Either HledgerParseErrors Text fixSmartDateStrEither d = fmap showEFDate . fixSmartDateStrEither' d fixSmartDateStrEither' :: Day -> Text -> Either HledgerParseErrors EFDay fixSmartDateStrEither' d s = case parsewith smartdateonly (T.toLower s) of Right sd -> Right $ fixSmartDate d sd Left e -> Left e -- | Convert a SmartDate to a specific date using the provided reference date. -- This date will be exact or flexible depending on whether the day was -- specified exactly. (Missing least-significant parts produces a flex date.) -- -- ==== Examples: -- >>> :set -XOverloadedStrings -- >>> let t = fixSmartDateStr (fromGregorian 2008 11 26) -- >>> t "0000-01-01" -- "0000-01-01" -- >>> t "1999-12-02" -- "1999-12-02" -- >>> t "1999.12.02" -- "1999-12-02" -- >>> t "1999/3/2" -- "1999-03-02" -- >>> t "19990302" -- "1999-03-02" -- >>> t "2008/2" -- "2008-02-01" -- >>> t "0020/2" -- "0020-02-01" -- >>> t "1000" -- "1000-01-01" -- >>> t "4/2" -- "2008-04-02" -- >>> t "2" -- "2008-11-02" -- >>> t "January" -- "2008-01-01" -- >>> t "feb" -- "2008-02-01" -- >>> t "today" -- "2008-11-26" -- >>> t "yesterday" -- "2008-11-25" -- >>> t "tomorrow" -- "2008-11-27" -- >>> t "this day" -- "2008-11-26" -- >>> t "last day" -- "2008-11-25" -- >>> t "next day" -- "2008-11-27" -- >>> t "this week" -- last monday -- "2008-11-24" -- >>> t "last week" -- previous monday -- "2008-11-17" -- >>> t "next week" -- next monday -- "2008-12-01" -- >>> t "this month" -- "2008-11-01" -- >>> t "last month" -- "2008-10-01" -- >>> t "next month" -- "2008-12-01" -- >>> t "this quarter" -- "2008-10-01" -- >>> t "last quarter" -- "2008-07-01" -- >>> t "next quarter" -- "2009-01-01" -- >>> t "this year" -- "2008-01-01" -- >>> t "last year" -- "2007-01-01" -- >>> t "next year" -- "2009-01-01" -- -- t "last wed" -- "2008-11-19" -- t "next friday" -- "2008-11-28" -- t "next january" -- "2009-01-01" -- -- >>> t "in 5 days" -- "2008-12-01" -- >>> t "in 7 months" -- "2009-06-01" -- >>> t "in -2 weeks" -- "2008-11-10" -- >>> t "1 quarter ago" -- "2008-07-01" -- >>> t "1 week ahead" -- "2008-12-01" fixSmartDate :: Day -> SmartDate -> EFDay fixSmartDate refdate = fix where fix :: SmartDate -> EFDay fix (SmartCompleteDate d) = Exact d fix (SmartAssumeStart y m) = Flex $ fromGregorian y (fromMaybe 1 m) 1 fix (SmartFromReference m d) = Exact $ fromGregorian ry (fromMaybe rm m) d fix (SmartMonth m) = Flex $ fromGregorian ry m 1 fix (SmartRelative n Day) = Exact $ addDays n refdate fix (SmartRelative n Week) = Flex $ addDays (7*n) $ thisweek refdate fix (SmartRelative n Month) = Flex $ addGregorianMonthsClip n $ thismonth refdate fix (SmartRelative n Quarter) = Flex $ addGregorianMonthsClip (3*n) $ thisquarter refdate fix (SmartRelative n Year) = Flex $ addGregorianYearsClip n $ thisyear refdate (ry, rm, _) = toGregorian refdate prevday :: Day -> Day prevday = addDays (-1) nextday = addDays 1 thisweek = startofweek prevweek = startofweek . addDays (-7) nextweek = startofweek . addDays 7 startofweek day = fromMondayStartWeek y w 1 where (y,_,_) = toGregorian day (w,_) = mondayStartWeek day thismonth = startofmonth prevmonth = startofmonth . addGregorianMonthsClip (-1) nextmonth = startofmonth . addGregorianMonthsClip 1 startofmonth day = fromGregorian y m 1 where (y,m,_) = toGregorian day nthdayofmonth d day = fromGregorian y m d where (y,m,_) = toGregorian day thisquarter = startofquarter startofquarter day = fromGregorian y (firstmonthofquarter m) 1 where (y,m,_) = toGregorian day firstmonthofquarter m2 = ((m2-1) `div` 3) * 3 + 1 thisyear = startofyear prevyear = startofyear . addGregorianYearsClip (-1) nextyear = startofyear . addGregorianYearsClip 1 startofyear day = fromGregorian y 1 1 where (y,_,_) = toGregorian day -- Get the natural start for the given interval that falls on or before the given day, -- when applicable. Works for Weeks, Months, Quarters, Years, eg. intervalBoundaryBefore :: Interval -> Day -> Day intervalBoundaryBefore i d = case splitSpan True i (DateSpan (Just $ Exact d) (Just $ Exact $ addDays 1 d)) of (DateSpan (Just start) _:_) -> fromEFDay start _ -> d -- | For given date d find year-long interval that starts on given -- MM/DD of year and covers it. -- The given MM and DD should be basically valid (1-12 & 1-31), -- or an error is raised. -- -- Examples: lets take 2017-11-22. Year-long intervals covering it that -- starts before Nov 22 will start in 2017. However -- intervals that start after Nov 23rd should start in 2016: -- >>> let wed22nd = fromGregorian 2017 11 22 -- >>> nthdayofyearcontaining 11 21 wed22nd -- 2017-11-21 -- >>> nthdayofyearcontaining 11 22 wed22nd -- 2017-11-22 -- >>> nthdayofyearcontaining 11 23 wed22nd -- 2016-11-23 -- >>> nthdayofyearcontaining 12 02 wed22nd -- 2016-12-02 -- >>> nthdayofyearcontaining 12 31 wed22nd -- 2016-12-31 -- >>> nthdayofyearcontaining 1 1 wed22nd -- 2017-01-01 nthdayofyearcontaining :: Month -> MonthDay -> Day -> Day nthdayofyearcontaining m mdy date -- PARTIAL: | not (validMonth m) = error' $ "nthdayofyearcontaining: invalid month "++show m | not (validDay mdy) = error' $ "nthdayofyearcontaining: invalid day " ++show mdy | mmddOfSameYear <= date = mmddOfSameYear | otherwise = mmddOfPrevYear where mmddOfSameYear = addDays (toInteger mdy-1) $ applyN (m-1) nextmonth s mmddOfPrevYear = addDays (toInteger mdy-1) $ applyN (m-1) nextmonth $ prevyear s s = startofyear date -- | For a given date d find the month-long period that starts on day n of a month -- that includes d. (It will begin on day n or either d's month or the previous month.) -- The given day of month should be in the range 1-31, or an error will be raised. -- -- Examples: lets take 2017-11-22. Month-long intervals covering it that -- start on 1st-22nd of month will start in Nov. However -- intervals that start on 23rd-30th of month should start in Oct: -- >>> let wed22nd = fromGregorian 2017 11 22 -- >>> nthdayofmonthcontaining 1 wed22nd -- 2017-11-01 -- >>> nthdayofmonthcontaining 12 wed22nd -- 2017-11-12 -- >>> nthdayofmonthcontaining 22 wed22nd -- 2017-11-22 -- >>> nthdayofmonthcontaining 23 wed22nd -- 2017-10-23 -- >>> nthdayofmonthcontaining 30 wed22nd -- 2017-10-30 nthdayofmonthcontaining :: MonthDay -> Day -> Day nthdayofmonthcontaining mdy date -- PARTIAL: | not (validDay mdy) = error' $ "nthdayofmonthcontaining: invalid day " ++show mdy | nthOfSameMonth <= date = nthOfSameMonth | otherwise = nthOfPrevMonth where nthOfSameMonth = nthdayofmonth mdy s nthOfPrevMonth = nthdayofmonth mdy $ prevmonth s s = startofmonth date -- | For given date d find week-long interval that starts on nth day of week -- and covers it. -- -- Examples: 2017-11-22 is Wed. Week-long intervals that cover it and -- start on Mon, Tue or Wed will start in the same week. However -- intervals that start on Thu or Fri should start in prev week: -- >>> let wed22nd = fromGregorian 2017 11 22 -- >>> nthdayofweekcontaining 1 wed22nd -- 2017-11-20 -- >>> nthdayofweekcontaining 2 wed22nd -- 2017-11-21 -- >>> nthdayofweekcontaining 3 wed22nd -- 2017-11-22 -- >>> nthdayofweekcontaining 4 wed22nd -- 2017-11-16 -- >>> nthdayofweekcontaining 5 wed22nd -- 2017-11-17 nthdayofweekcontaining :: WeekDay -> Day -> Day nthdayofweekcontaining n d | nthOfSameWeek <= d = nthOfSameWeek | otherwise = nthOfPrevWeek where nthOfSameWeek = addDays (toInteger n-1) s nthOfPrevWeek = addDays (toInteger n-1) $ prevweek s s = startofweek d -- | For given date d find month-long interval that starts on nth weekday of month -- and covers it. -- -- Examples: 2017-11-22 is 3rd Wed of Nov. Month-long intervals that cover it and -- start on 1st-4th Wed will start in Nov. However -- intervals that start on 4th Thu or Fri or later should start in Oct: -- >>> let wed22nd = fromGregorian 2017 11 22 -- >>> nthweekdayofmonthcontaining 1 3 wed22nd -- 2017-11-01 -- >>> nthweekdayofmonthcontaining 3 2 wed22nd -- 2017-11-21 -- >>> nthweekdayofmonthcontaining 4 3 wed22nd -- 2017-11-22 -- >>> nthweekdayofmonthcontaining 4 4 wed22nd -- 2017-10-26 -- >>> nthweekdayofmonthcontaining 4 5 wed22nd -- 2017-10-27 nthweekdayofmonthcontaining :: Int -> WeekDay -> Day -> Day nthweekdayofmonthcontaining n wd d | nthWeekdaySameMonth <= d = nthWeekdaySameMonth | otherwise = nthWeekdayPrevMonth where nthWeekdaySameMonth = advancetonthweekday n wd $ startofmonth d nthWeekdayPrevMonth = advancetonthweekday n wd $ prevmonth d -- | Advance to nth weekday wd after given start day s -- Can call error. advancetonthweekday :: Int -> WeekDay -> Day -> Day advancetonthweekday n wd s = -- PARTIAL: maybe err (addWeeks (n-1)) $ firstMatch (>=s) $ iterate (addWeeks 1) $ firstweekday s where err = error' "advancetonthweekday: should not happen" addWeeks k = addDays (7 * toInteger k) firstMatch p = headMay . dropWhile (not . p) firstweekday = addDays (toInteger wd-1) . startofweek ---------------------------------------------------------------------- -- parsing -- -- | Parse a couple of date-time string formats to a time type. -- parsedatetimeM :: String -> Maybe LocalTime -- parsedatetimeM s = asum [ -- parseTimeM TruedefaultTimeLocale "%Y/%m/%d %H:%M:%S" s, -- parseTimeM TruedefaultTimeLocale "%Y-%m-%d %H:%M:%S" s -- ] -- | Try to parse a couple of date string formats: -- `YYYY-MM-DD`, `YYYY/MM/DD` or `YYYY.MM.DD`, with leading zeros required. -- For internal use, not quite the same as the journal's "simple dates". -- >>> parsedateM "2008/02/03" -- Just 2008-02-03 -- >>> parsedateM "2008/02/03/" -- Nothing -- >>> parsedateM "2008/02/30" -- Nothing parsedateM :: String -> Maybe Day parsedateM s = asum [ parseTimeM True defaultTimeLocale "%Y-%m-%d" s, parseTimeM True defaultTimeLocale "%Y/%m/%d" s, parseTimeM True defaultTimeLocale "%Y.%m.%d" s ] {-| Parse a date in any of the formats allowed in Ledger's period expressions, and some others. Assumes any text in the parse stream has been lowercased. Returns a SmartDate, to be converted to a full date later (see fixSmartDate). Examples: > 2004 (start of year, which must have 4+ digits) > 2004/10 (start of month, which must be 1-12) > 2004/10/1 (exact date, day must be 1-31) > 10/1 (month and day in current year) > 21 (day in current month) > october, oct (start of month in current year) > yesterday, today, tomorrow (-1, 0, 1 days from today) > last/this/next day/week/month/quarter/year (-1, 0, 1 periods from the current period) > in n days/weeks/months/quarters/years (n periods from the current period) > n days/weeks/months/quarters/years ago (-n periods from the current period) > 20181201 (8 digit YYYYMMDD with valid year month and day) > 201812 (6 digit YYYYMM with valid year and month) Note malformed digit sequences might give surprising results: > 201813 (6 digits with an invalid month is parsed as start of 6-digit year) > 20181301 (8 digits with an invalid month is parsed as start of 8-digit year) > 20181232 (8 digits with an invalid day gives an error) > 201801012 (9+ digits beginning with a valid YYYYMMDD gives an error) Eg: YYYYMMDD is parsed as year-month-date if those parts are valid (>=4 digits, 1-12, and 1-31 respectively): >>> parsewith (smartdate <* eof) "20181201" Right (SmartCompleteDate 2018-12-01) YYYYMM is parsed as year-month-01 if year and month are valid: >>> parsewith (smartdate <* eof) "201804" Right (SmartAssumeStart 2018 (Just 4)) With an invalid month, it's parsed as a year: >>> parsewith (smartdate <* eof) "201813" Right (SmartAssumeStart 201813 Nothing) A 9+ digit number beginning with valid YYYYMMDD gives an error: >>> parsewith (smartdate <* eof) "201801012" Left (...) Big numbers not beginning with a valid YYYYMMDD are parsed as a year: >>> parsewith (smartdate <* eof) "201813012" Right (SmartAssumeStart 201813012 Nothing) -} smartdate :: TextParser m SmartDate smartdate = choice' -- XXX maybe obscures date errors ? see ledgerdate [ relativeP , yyyymmdd, ymd , (\(m,d) -> SmartFromReference (Just m) d) <$> md , failIfInvalidDate . SmartFromReference Nothing =<< decimal , SmartMonth <$> (month <|> mon) , SmartRelative 0 Day <$ string' "today" , SmartRelative (-1) Day <$ string' "yesterday" , SmartRelative 1 Day <$ string' "tomorrow" ] where relativeP = do optional $ string' "in" <* skipNonNewlineSpaces num <- seqP <* skipNonNewlineSpaces interval <- intervalP <* skipNonNewlineSpaces sign <- choice [negate <$ string' "ago", id <$ string' "ahead", pure id] return $ SmartRelative (sign num) interval seqP = choice [ 0 <$ string' "this", -1 <$ string' "last", 1 <$ string' "next", signed skipNonNewlineSpaces decimal ] intervalP = choice [ Day <$ string' "day", Week <$ string' "week", Month <$ string' "month" , Quarter <$ string' "quarter", Year <$ string' "year" ] <* optional (char' 's') -- | Like smartdate, but there must be nothing other than whitespace after the date. smartdateonly :: TextParser m SmartDate smartdateonly = smartdate <* skipNonNewlineSpaces <* eof datesepchars :: String datesepchars = "/-." datesepchar :: TextParser m Char datesepchar = satisfy isDateSepChar isDateSepChar :: Char -> Bool isDateSepChar c = c == '-' || c == '/' || c == '.' validMonth, validDay :: Int -> Bool validMonth n = n >= 1 && n <= 12 validDay n = n >= 1 && n <= 31 failIfInvalidDate :: Fail.MonadFail m => SmartDate -> m SmartDate failIfInvalidDate s = unless isValid (Fail.fail $ "bad smart date: " ++ show s) $> s where isValid = case s of SmartAssumeStart _ (Just m) -> validMonth m SmartFromReference mm d -> isJust $ fromGregorianValid 2004 (fromMaybe 1 mm) d SmartMonth m -> validMonth m _ -> True showBadDate :: Integer -> Int -> Int -> String showBadDate y m d = "bad smart date: " ++ show y ++ "-" ++ show m ++ "-" ++ show d yyyymmdd :: TextParser m SmartDate yyyymmdd = do y <- read <$> count 4 digitChar m <- read <$> count 2 digitChar mdy <- optional $ read <$> count 2 digitChar case mdy of Nothing -> failIfInvalidDate $ SmartAssumeStart y (Just m) Just d -> maybe (Fail.fail $ showBadDate y m d) (return . SmartCompleteDate) $ fromGregorianValid y m d ymd :: TextParser m SmartDate ymd = do y <- yearp emd <- optional . try $ do sep <- datesepchar m <- decimal unless (validMonth m) $ Fail.fail ("Bad month " <> show m) option (Left m) . try $ Right <$> do _ <- char sep d <- decimal maybe (Fail.fail $ showBadDate y m d) return $ fromGregorianValid y m d return $ case emd of Nothing -> SmartAssumeStart y Nothing Just (Left m) -> SmartAssumeStart y (Just m) Just (Right day) -> SmartCompleteDate day md :: TextParser m (Month, MonthDay) md = do m <- decimal datesepchar d <- decimal _ <- failIfInvalidDate $ SmartFromReference (Just m) d return (m, d) -- | Parse a year number from a Text, making sure that at least four digits are -- used. yearp :: TextParser m Integer yearp = do year <- takeWhile1P (Just "year") isDigit unless (T.length year >= 4) . Fail.fail $ "Year must contain at least 4 digits: " <> T.unpack year return $ readDecimal year -- These are compared case insensitively, and should all be kept lower case. months = ["january","february","march","april","may","june", "july","august","september","october","november","december"] monthabbrevs = ["jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"] weekdays = ["monday","tuesday","wednesday","thursday","friday","saturday","sunday"] weekdayabbrevs = ["mon","tue","wed","thu","fri","sat","sun"] month, mon :: TextParser m Month month = choice $ zipWith (\i m -> i <$ string' m) [1..12] months mon = choice $ zipWith (\i m -> i <$ string' m) [1..12] monthabbrevs weekday :: TextParser m Int weekday = do wday <- T.toLower <$> (choice . map string' $ weekdays ++ weekdayabbrevs) case catMaybes $ [wday `elemIndex` weekdays, wday `elemIndex` weekdayabbrevs] of (i:_) -> return (i+1) [] -> Fail.fail $ "weekday: should not happen: attempted to find " <> show wday <> " in " <> show (weekdays ++ weekdayabbrevs) weekdaysp :: TextParser m [Int] weekdaysp = fmap head . group . sort <$> sepBy1 weekday (string' ",") -- | Parse a period expression, specifying a date span and optionally -- a reporting interval. Requires a reference "today" date for -- resolving any relative start/end dates (only; it is not needed for -- parsing the reporting interval). -- -- >>> let p = parsePeriodExpr (fromGregorian 2008 11 26) -- >>> p "from Aug to Oct" -- Right (NoInterval,DateSpan 2008-08-01..2008-09-30) -- >>> p "aug to oct" -- Right (NoInterval,DateSpan 2008-08-01..2008-09-30) -- >>> p "2009q2" -- Right (NoInterval,DateSpan 2009Q2) -- >>> p "Q3" -- Right (NoInterval,DateSpan 2008Q3) -- >>> p "every 3 days in Aug" -- Right (Days 3,DateSpan 2008-08) -- >>> p "daily from aug" -- Right (Days 1,DateSpan 2008-08-01..) -- >>> p "every week to 2009" -- Right (Weeks 1,DateSpan ..2008-12-31) -- >>> p "every 2nd day of month" -- Right (DayOfMonth 2,DateSpan ..) -- >>> p "every 2nd day" -- Right (DayOfMonth 2,DateSpan ..) -- >>> p "every 2nd day 2009.." -- Right (DayOfMonth 2,DateSpan 2009-01-01..) -- >>> p "every 2nd day 2009-" -- Right (DayOfMonth 2,DateSpan 2009-01-01..) -- >>> p "every 29th Nov" -- Right (DayOfYear 11 29,DateSpan ..) -- >>> p "every 29th nov ..2009" -- Right (DayOfYear 11 29,DateSpan ..2008-12-31) -- >>> p "every nov 29th" -- Right (DayOfYear 11 29,DateSpan ..) -- >>> p "every Nov 29th 2009.." -- Right (DayOfYear 11 29,DateSpan 2009-01-01..) -- >>> p "every 11/29 from 2009" -- Right (DayOfYear 11 29,DateSpan 2009-01-01..) -- >>> p "every 11/29 since 2009" -- Right (DayOfYear 11 29,DateSpan 2009-01-01..) -- >>> p "every 2nd Thursday of month to 2009" -- Right (WeekdayOfMonth 2 4,DateSpan ..2008-12-31) -- >>> p "every 1st monday of month to 2009" -- Right (WeekdayOfMonth 1 1,DateSpan ..2008-12-31) -- >>> p "every tue" -- Right (DaysOfWeek [2],DateSpan ..) -- >>> p "every 2nd day of week" -- Right (DaysOfWeek [2],DateSpan ..) -- >>> p "every 2nd day of month" -- Right (DayOfMonth 2,DateSpan ..) -- >>> p "every 2nd day" -- Right (DayOfMonth 2,DateSpan ..) -- >>> p "every 2nd day 2009.." -- Right (DayOfMonth 2,DateSpan 2009-01-01..) -- >>> p "every 2nd day of month 2009.." -- Right (DayOfMonth 2,DateSpan 2009-01-01..) periodexprp :: Day -> TextParser m (Interval, DateSpan) periodexprp rdate = do skipNonNewlineSpaces choice' [ intervalanddateperiodexprp rdate , (,) NoInterval <$> periodexprdatespanp rdate ] -- Parse a reporting interval and a date span. intervalanddateperiodexprp :: Day -> TextParser m (Interval, DateSpan) intervalanddateperiodexprp rdate = do i <- reportingintervalp s <- option def . try $ do skipNonNewlineSpaces periodexprdatespanp rdate return (i,s) -- Parse a reporting interval. reportingintervalp :: TextParser m Interval reportingintervalp = choice' [ tryinterval "day" "daily" Days , tryinterval "month" "monthly" Months , tryinterval "quarter" "quarterly" Quarters , tryinterval "year" "yearly" Years , Weeks 2 <$ string' "biweekly" , Weeks 2 <$ string' "fortnightly" , Months 2 <$ string' "bimonthly" , string' "every" *> skipNonNewlineSpaces *> choice' [ DaysOfWeek . pure <$> (nth <* skipNonNewlineSpaces <* string' "day" <* of_ "week") , DayOfMonth <$> (nth <* skipNonNewlineSpaces <* string' "day" <* optOf_ "month") , liftA2 WeekdayOfMonth nth $ skipNonNewlineSpaces *> weekday <* optOf_ "month" , uncurry DayOfYear <$> (md <* optOf_ "year") , DaysOfWeek <$> weekdaysp , DaysOfWeek [1..5] <$ string' "weekday" , DaysOfWeek [6..7] <$ string' "weekendday" , d_o_y <* optOf_ "year" ] -- NB: the ordering is important here since the parse for `every weekday` -- would match the `tryinterval` first and then error on `d`. Perhaps it -- would be clearer to factor some of this into the `every` choice or other -- left-factorings. , tryinterval "week" "weekly" Weeks ] where of_ period = skipNonNewlineSpaces *> string' "of" *> skipNonNewlineSpaces *> string' period optOf_ period = optional . try $ of_ period nth = decimal <* choice (map string' ["st","nd","rd","th"]) d_o_y = runPermutation $ liftA2 DayOfYear (toPermutation $ (month <|> mon) <* skipNonNewlineSpaces) (toPermutation $ nth <* skipNonNewlineSpaces) -- Parse any of several variants of a basic interval, eg "daily", "every day", "every N days". tryinterval :: Text -> Text -> (Int -> Interval) -> TextParser m Interval tryinterval singular compact intcons = intcons <$> choice' [ 1 <$ string' compact , string' "every" *> skipNonNewlineSpaces *> choice [ 1 <$ string' singular , decimal <* skipNonNewlineSpaces <* string' (singular <> "s") ] ] periodexprdatespanp :: Day -> TextParser m DateSpan periodexprdatespanp rdate = choice $ map try [ doubledatespanp rdate, quarterdatespanp rdate, fromdatespanp rdate, todatespanp rdate, justdatespanp rdate ] -- | -- >>> parsewith (doubledatespanp (fromGregorian 2018 01 01) <* eof) "20180101-201804" -- Right DateSpan 2018Q1 -- >>> parsewith (doubledatespanp (fromGregorian 2018 01 01) <* eof) "2017..2018" -- Right DateSpan 2017 -- >>> parsewith (doubledatespanp (fromGregorian 2018 01 01) <* eof) "2017-2018" -- Right DateSpan 2017 -- >>> parsewith (doubledatespanp (fromGregorian 2018 01 01) <* eof) "2017-01-2018" -- Right DateSpan 2017 -- >>> parsewith (doubledatespanp (fromGregorian 2018 01 01) <* eof) "2017-01-01-2018" -- Right DateSpan 2017 doubledatespanp :: Day -> TextParser m DateSpan doubledatespanp rdate = liftA2 fromToSpan (optional ((string' "from" <|> string' "since") *> skipNonNewlineSpaces) *> smartdate) (skipNonNewlineSpaces *> choice [string' "to", string "..", string "-"] *> skipNonNewlineSpaces *> smartdate) where fromToSpan = DateSpan `on` (Just . fixSmartDate rdate) -- | -- >>> parsewith (quarterdatespanp (fromGregorian 2018 01 01) <* eof) "q1" -- Right DateSpan 2018Q1 -- >>> parsewith (quarterdatespanp (fromGregorian 2018 01 01) <* eof) "Q1" -- Right DateSpan 2018Q1 -- >>> parsewith (quarterdatespanp (fromGregorian 2018 01 01) <* eof) "2020q4" -- Right DateSpan 2020Q4 quarterdatespanp :: Day -> TextParser m DateSpan quarterdatespanp rdate = do y <- yearp <|> pure (first3 $ toGregorian rdate) q <- char' 'q' *> satisfy is4Digit return . periodAsDateSpan $ QuarterPeriod y (digitToInt q) where is4Digit c = (fromIntegral (ord c - ord '1') :: Word) <= 3 fromdatespanp :: Day -> TextParser m DateSpan fromdatespanp rdate = fromSpan <$> choice [ (string' "from" <|> string' "since") *> skipNonNewlineSpaces *> smartdate , smartdate <* choice [string "..", string "-"] ] where fromSpan b = DateSpan (Just $ fixSmartDate rdate b) Nothing todatespanp :: Day -> TextParser m DateSpan todatespanp rdate = choice [string' "to", string' "until", string "..", string "-"] *> skipNonNewlineSpaces *> (DateSpan Nothing . Just . fixSmartDate rdate <$> smartdate) justdatespanp :: Day -> TextParser m DateSpan justdatespanp rdate = optional (string' "in" *> skipNonNewlineSpaces) *> (spanFromSmartDate rdate <$> smartdate) nulldatespan :: DateSpan nulldatespan = DateSpan Nothing Nothing -- | An exact datespan of zero length, that matches no date. emptydatespan :: DateSpan emptydatespan = DateSpan (Just $ Exact $ addDays 1 nulldate) (Just $ Exact nulldate) nulldate :: Day nulldate = fromGregorian 0 1 1 -- tests tests_Dates = testGroup "Dates" [ testCase "weekday" $ do splitSpan False (DaysOfWeek [1..5]) (DateSpan (Just $ Exact $ fromGregorian 2021 07 01) (Just $ Exact $ fromGregorian 2021 07 08)) @?= [ (DateSpan (Just $ Exact $ fromGregorian 2021 06 28) (Just $ Exact $ fromGregorian 2021 06 29)) , (DateSpan (Just $ Exact $ fromGregorian 2021 06 29) (Just $ Exact $ fromGregorian 2021 06 30)) , (DateSpan (Just $ Exact $ fromGregorian 2021 06 30) (Just $ Exact $ fromGregorian 2021 07 01)) , (DateSpan (Just $ Exact $ fromGregorian 2021 07 01) (Just $ Exact $ fromGregorian 2021 07 02)) , (DateSpan (Just $ Exact $ fromGregorian 2021 07 02) (Just $ Exact $ fromGregorian 2021 07 05)) -- next week , (DateSpan (Just $ Exact $ fromGregorian 2021 07 05) (Just $ Exact $ fromGregorian 2021 07 06)) , (DateSpan (Just $ Exact $ fromGregorian 2021 07 06) (Just $ Exact $ fromGregorian 2021 07 07)) , (DateSpan (Just $ Exact $ fromGregorian 2021 07 07) (Just $ Exact $ fromGregorian 2021 07 08)) ] splitSpan False (DaysOfWeek [1, 5]) (DateSpan (Just $ Exact $ fromGregorian 2021 07 01) (Just $ Exact $ fromGregorian 2021 07 08)) @?= [ (DateSpan (Just $ Exact $ fromGregorian 2021 06 28) (Just $ Exact $ fromGregorian 2021 07 02)) , (DateSpan (Just $ Exact $ fromGregorian 2021 07 02) (Just $ Exact $ fromGregorian 2021 07 05)) -- next week , (DateSpan (Just $ Exact $ fromGregorian 2021 07 05) (Just $ Exact $ fromGregorian 2021 07 09)) ] , testCase "match dayOfWeek" $ do let dayofweek n = splitspan (nthdayofweekcontaining n) (\w -> (if w == 0 then id else applyN (n-1) nextday . applyN (fromInteger w) nextweek)) 1 matchdow ds day = splitSpan False (DaysOfWeek [day]) ds @?= dayofweek day ds ys2021 = fromGregorian 2021 01 01 ye2021 = fromGregorian 2021 12 31 ys2022 = fromGregorian 2022 01 01 mapM_ (matchdow (DateSpan (Just $ Exact ys2021) (Just $ Exact ye2021))) [1..7] mapM_ (matchdow (DateSpan (Just $ Exact ys2021) (Just $ Exact ys2022))) [1..7] mapM_ (matchdow (DateSpan (Just $ Exact ye2021) (Just $ Exact ys2022))) [1..7] mapM_ (matchdow (DateSpan (Just $ Exact ye2021) Nothing)) [1..7] mapM_ (matchdow (DateSpan (Just $ Exact ys2022) Nothing)) [1..7] mapM_ (matchdow (DateSpan Nothing (Just $ Exact ye2021))) [1..7] mapM_ (matchdow (DateSpan Nothing (Just $ Exact ys2022))) [1..7] ] hledger-lib-1.30/Hledger/Data/Errors.hs0000644000000000000000000002201014434445206016035 0ustar0000000000000000{-| Helpers for making error messages. -} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} module Hledger.Data.Errors ( makeAccountTagErrorExcerpt, makeTransactionErrorExcerpt, makePostingErrorExcerpt, makePostingAccountErrorExcerpt, makeBalanceAssertionErrorExcerpt, transactionFindPostingIndex, ) where import Data.Function ((&)) import Data.List (find) import Data.Text (Text) import qualified Data.Text as T import Hledger.Data.Transaction (showTransaction) import Hledger.Data.Types import Hledger.Utils import Data.Maybe import Safe (headMay) import Hledger.Data.Posting (isVirtual) -- | Given an account name and its account directive, and a problem tag within the latter: -- render it as a megaparsec-style excerpt, showing the original line number and -- marked column or region. -- Returns the file path, line number, column(s) if known, -- and the rendered excerpt, or as much of these as is possible. -- The returned columns will be accurate for the rendered error message but not for the original journal data. makeAccountTagErrorExcerpt :: (AccountName, AccountDeclarationInfo) -> TagName -> (FilePath, Int, Maybe (Int, Maybe Int), Text) makeAccountTagErrorExcerpt (a, adi) _t = (f, l, merrcols, ex) -- XXX findtxnerrorcolumns is awkward, I don't think this is the final form where (SourcePos f pos _) = adisourcepos adi l = unPos pos txt = showAccountDirective (a, adi) & textChomp & (<>"\n") ex = decorateTagErrorExcerpt l merrcols txt -- Calculate columns which will help highlight the region in the excerpt -- (but won't exactly match the real data, so won't be shown in the main error line) merrcols = Nothing -- don't bother for now -- Just (col, Just col2) -- where -- col = undefined -- T.length (showTransactionLineFirstPart t') + 2 -- col2 = undefined -- col + T.length tagname - 1 showAccountDirective (a, AccountDeclarationInfo{..}) = "account " <> a <> (if not $ T.null adicomment then " ; " <> adicomment else "") -- | Add megaparsec-style left margin, line number, and optional column marker(s). decorateTagErrorExcerpt :: Int -> Maybe (Int, Maybe Int) -> Text -> Text decorateTagErrorExcerpt l mcols txt = T.unlines $ ls' <> colmarkerline <> map (lineprefix<>) ms where (ls,ms) = splitAt 1 $ T.lines txt ls' = map ((T.pack (show l) <> " | ") <>) ls colmarkerline = [lineprefix <> T.replicate (col-1) " " <> T.replicate regionw "^" | Just (col, mendcol) <- [mcols] , let regionw = maybe 1 (subtract col) mendcol + 1 ] lineprefix = T.replicate marginw " " <> "| " where marginw = length (show l) + 1 _showAccountDirective = undefined -- | Given a problem transaction and a function calculating the best -- column(s) for marking the error region: -- render it as a megaparsec-style excerpt, showing the original line number -- on the transaction line, and a column(s) marker. -- Returns the file path, line number, column(s) if known, -- and the rendered excerpt, or as much of these as is possible. -- The returned columns will be accurate for the rendered error message but not for the original journal data. makeTransactionErrorExcerpt :: Transaction -> (Transaction -> Maybe (Int, Maybe Int)) -> (FilePath, Int, Maybe (Int, Maybe Int), Text) makeTransactionErrorExcerpt t findtxnerrorcolumns = (f, tl, merrcols, ex) -- XXX findtxnerrorcolumns is awkward, I don't think this is the final form where (SourcePos f tpos _) = fst $ tsourcepos t tl = unPos tpos txntxt = showTransaction t & textChomp & (<>"\n") merrcols = findtxnerrorcolumns t ex = decorateTransactionErrorExcerpt tl merrcols txntxt -- | Add megaparsec-style left margin, line number, and optional column marker(s). decorateTransactionErrorExcerpt :: Int -> Maybe (Int, Maybe Int) -> Text -> Text decorateTransactionErrorExcerpt l mcols txt = T.unlines $ ls' <> colmarkerline <> map (lineprefix<>) ms where (ls,ms) = splitAt 1 $ T.lines txt ls' = map ((T.pack (show l) <> " | ") <>) ls colmarkerline = [lineprefix <> T.replicate (col-1) " " <> T.replicate regionw "^" | Just (col, mendcol) <- [mcols] , let regionw = maybe 1 (subtract col) mendcol + 1 ] lineprefix = T.replicate marginw " " <> "| " where marginw = length (show l) + 1 -- | Given a problem posting and a function calculating the best -- column(s) for marking the error region: -- look up error info from the parent transaction, and render the transaction -- as a megaparsec-style excerpt, showing the original line number -- on the problem posting's line, and a column indicator. -- Returns the file path, line number, column(s) if known, -- and the rendered excerpt, or as much of these as is possible. -- A limitation: columns will be accurate for the rendered error message but not for the original journal data. makePostingErrorExcerpt :: Posting -> (Posting -> Transaction -> Text -> Maybe (Int, Maybe Int)) -> (FilePath, Int, Maybe (Int, Maybe Int), Text) makePostingErrorExcerpt p findpostingerrorcolumns = case ptransaction p of Nothing -> ("-", 0, Nothing, "") Just t -> (f, errabsline, merrcols, ex) where (SourcePos f tl _) = fst $ tsourcepos t mpindex = transactionFindPostingIndex (==p) t errrelline = case mpindex of Nothing -> 0 Just pindex -> commentExtraLines (tcomment t) + sum (map postingLines $ take pindex $ tpostings t) where -- How many lines are used to render this posting ? postingLines p' = 1 + commentExtraLines (pcomment p') -- How many extra lines does this comment add to a transaction or posting rendering ? commentExtraLines c = max 0 (length (T.lines c) - 1) errabsline = unPos tl + errrelline txntxt = showTransaction t & textChomp & (<>"\n") merrcols = findpostingerrorcolumns p t txntxt ex = decoratePostingErrorExcerpt errabsline errrelline merrcols txntxt -- | Add megaparsec-style left margin, line number, and optional column marker(s). decoratePostingErrorExcerpt :: Int -> Int -> Maybe (Int, Maybe Int) -> Text -> Text decoratePostingErrorExcerpt absline relline mcols txt = T.unlines $ js' <> ks' <> colmarkerline <> ms' where (ls,ms) = splitAt (relline+1) $ T.lines txt (js,ks) = splitAt (length ls - 1) ls (js',ks') = case ks of [k] -> (map (lineprefix<>) js, [T.pack (show absline) <> " | " <> k]) _ -> ([], []) ms' = map (lineprefix<>) ms colmarkerline = [lineprefix <> T.replicate (col-1) " " <> T.replicate regionw "^" | Just (col, mendcol) <- [mcols] , let regionw = 1 + maybe 0 (subtract col) mendcol ] lineprefix = T.replicate marginw " " <> "| " where marginw = length (show absline) + 1 -- | Find the 1-based index of the first posting in this transaction -- satisfying the given predicate. transactionFindPostingIndex :: (Posting -> Bool) -> Transaction -> Maybe Int transactionFindPostingIndex ppredicate = fmap fst . find (ppredicate.snd) . zip [1..] . tpostings -- | From the given posting, make an error excerpt showing the transaction with -- this posting's account part highlighted. makePostingAccountErrorExcerpt :: Posting -> (FilePath, Int, Maybe (Int, Maybe Int), Text) makePostingAccountErrorExcerpt p = makePostingErrorExcerpt p finderrcols where -- Calculate columns suitable for highlighting the synthetic excerpt. finderrcols p' _ _ = Just (col, Just col2) where col = 5 + if isVirtual p' then 1 else 0 col2 = col + T.length (paccount p') - 1 -- | From the given posting, make an error excerpt showing the transaction with -- the balance assertion highlighted. makeBalanceAssertionErrorExcerpt :: Posting -> (FilePath, Int, Maybe (Int, Maybe Int), Text) makeBalanceAssertionErrorExcerpt p = makePostingErrorExcerpt p finderrcols where finderrcols p' t trendered = Just (col, Just col2) where -- Analyse the rendering to find the columns to highlight. tlines = dbg5 "tlines" $ max 1 $ length $ T.lines $ tcomment t -- transaction comment can generate extra lines (col, col2) = let def = (5, maximum (map T.length $ T.lines trendered)) -- fallback: underline whole posting. Shouldn't happen. in case transactionFindPostingIndex (==p') t of Nothing -> def Just idx -> fromMaybe def $ do let beforeps = take (idx-1) $ tpostings t beforepslines = dbg5 "beforepslines" $ sum $ map (max 1 . length . T.lines . pcomment) beforeps -- posting comment can generate extra lines (assume only one commodity shown) assertionline <- dbg5 "assertionline" $ headMay $ drop (tlines + beforepslines) $ T.lines trendered let col2' = T.length assertionline l = dropWhile (/= '=') $ reverse $ T.unpack assertionline l' = dropWhile (`elem` ['=','*']) l col' = length l' + 1 return (col', col2') hledger-lib-1.30/Hledger/Data/Journal.hs0000644000000000000000000016076214434476632016223 0ustar0000000000000000{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE Rank2Types #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE RecordWildCards #-} {-| A 'Journal' is a set of transactions, plus optional related data. This is hledger's primary data object. It is usually parsed from a journal file or other data format (see "Hledger.Read"). -} module Hledger.Data.Journal ( -- * Parsing helpers JournalParser, ErroringJournalParser, addPriceDirective, addTransactionModifier, addPeriodicTransaction, addTransaction, journalInferMarketPricesFromTransactions, journalApplyCommodityStyles, commodityStylesFromAmounts, journalCommodityStyles, journalToCost, journalAddInferredEquityPostings, journalInferCostsFromEquity, journalMarkRedundantCosts, journalReverse, journalSetLastReadTime, journalRenumberAccountDeclarations, journalPivot, -- * Filtering filterJournalTransactions, filterJournalPostings, filterJournalRelatedPostings, filterJournalAmounts, filterTransactionAmounts, filterTransactionPostings, filterTransactionPostingsExtra, filterTransactionRelatedPostings, filterPostingAmount, -- * Mapping journalMapTransactions, journalMapPostings, journalMapPostingAmounts, -- * Querying journalAccountNamesUsed, journalAccountNamesImplied, journalAccountNamesDeclared, journalAccountNamesDeclaredOrUsed, journalAccountNamesDeclaredOrImplied, journalLeafAccountNamesDeclared, journalAccountNames, journalLeafAccountNames, journalAccountNameTree, journalAccountTags, journalInheritedAccountTags, -- journalAmountAndPriceCommodities, -- journalAmountStyles, -- overJournalAmounts, -- traverseJournalAmounts, -- journalCanonicalCommodities, journalPayeesDeclared, journalPayeesUsed, journalPayeesDeclaredOrUsed, journalTagsDeclared, journalTagsUsed, journalTagsDeclaredOrUsed, journalCommoditiesDeclared, journalCommodities, journalDateSpan, journalDateSpanBothDates, journalStartDate, journalEndDate, journalLastDay, journalDescriptions, journalFilePath, journalFilePaths, journalTransactionAt, journalNextTransaction, journalPrevTransaction, journalPostings, journalTransactionsSimilarTo, -- * Account types journalAccountType, journalAccountTypes, journalAddAccountTypes, journalPostingsAddAccountTags, -- journalPrices, journalConversionAccount, -- * Misc canonicalStyleFrom, nulljournal, journalConcat, journalNumberTransactions, journalNumberAndTieTransactions, journalUntieTransactions, journalModifyTransactions, journalApplyAliases, dbgJournalAcctDeclOrder, -- * Tests samplejournal, samplejournalMaybeExplicit, tests_Journal -- ) where import Control.Applicative ((<|>)) import Control.Monad.Except (ExceptT(..)) import Control.Monad.State.Strict (StateT) import Data.Char (toUpper, isDigit) import Data.Default (Default(..)) import Data.Foldable (toList) import Data.List ((\\), find, foldl', sortBy, union, intercalate) import Data.List.Extra (nubSort) import qualified Data.Map.Strict as M import Data.Maybe (catMaybes, fromMaybe, mapMaybe, maybeToList) import qualified Data.Set as S import Data.Text (Text) import qualified Data.Text as T import Safe (headMay, headDef, maximumMay, minimumMay, lastDef) import Data.Time.Calendar (Day, addDays, fromGregorian, diffDays) import Data.Time.Clock.POSIX (POSIXTime) import Data.Tree (Tree(..), flatten) import Text.Printf (printf) import Text.Megaparsec (ParsecT) import Text.Megaparsec.Custom (FinalParseError) import Hledger.Utils import Hledger.Data.Types import Hledger.Data.AccountName import Hledger.Data.Amount import Hledger.Data.Posting import Hledger.Data.Transaction import Hledger.Data.TransactionModifier import Hledger.Data.Valuation import Hledger.Query import System.FilePath (takeFileName) import Data.Ord (comparing) import Hledger.Data.Dates (nulldate) import Data.List (sort) -- | A parser of text that runs in some monad, keeping a Journal as state. type JournalParser m a = StateT Journal (ParsecT HledgerParseErrorData Text m) a -- | A parser of text that runs in some monad, keeping a Journal as -- state, that can throw an exception to end parsing, preventing -- further parser backtracking. type ErroringJournalParser m a = StateT Journal (ParsecT HledgerParseErrorData Text (ExceptT FinalParseError m)) a -- deriving instance Show Journal instance Show Journal where show j | debugLevel < 3 = printf "Journal %s with %d transactions, %d accounts" (journalFilePath j) (length $ jtxns j) (length accounts) | debugLevel < 6 = printf "Journal %s with %d transactions, %d accounts: %s" (journalFilePath j) (length $ jtxns j) (length accounts) (show accounts) | otherwise = printf "Journal %s with %d transactions, %d accounts: %s, commodity styles: %s" (journalFilePath j) (length $ jtxns j) (length accounts) (show accounts) (show $ jinferredcommodities j) -- ++ (show $ journalTransactions l) where accounts = filter (/= "root") $ flatten $ journalAccountNameTree j -- showJournalDebug j = unlines [ -- show j -- ,show (jtxns j) -- ,show (jtxnmodifiers j) -- ,show (jperiodictxns j) -- ,show $ jparsetimeclockentries j -- ,show $ jpricedirectives j -- ,show $ jfinalcommentlines j -- ,show $ jparsestate j -- ,show $ map fst $ jfiles j -- ] -- The semigroup instance for Journal is useful for two situations. -- -- 1. concatenating finalised journals, eg with multiple -f options: -- FIRST <> SECOND. -- -- 2. merging a child parsed journal, eg with the include directive: -- CHILD <> PARENT. A parsed journal's data is in reverse order, so -- this gives what we want. -- -- Note that (<>) is right-biased, so nulljournal is only a left identity. -- In particular, this prevents Journal from being a monoid. instance Semigroup Journal where j1 <> j2 = j1 `journalConcat` j2 -- | Merge two journals into one. -- Transaction counts are summed, map fields are combined, -- the second's list fields are appended to the first's, -- the second's parse state is kept. journalConcat :: Journal -> Journal -> Journal journalConcat j1 j2 = let f1 = takeFileName $ journalFilePath j1 f2 = maybe "(unknown)" takeFileName $ headMay $ jincludefilestack j2 -- XXX more accurate than journalFilePath for some reason in dbgJournalAcctDeclOrder ("journalConcat: " <> f1 <> " <> " <> f2 <> ", acct decls renumbered: ") $ journalRenumberAccountDeclarations $ dbgJournalAcctDeclOrder ("journalConcat: " <> f1 <> " <> " <> f2 <> ", acct decls : ") $ Journal { jparsedefaultyear = jparsedefaultyear j2 ,jparsedefaultcommodity = jparsedefaultcommodity j2 ,jparsedecimalmark = jparsedecimalmark j2 ,jparseparentaccounts = jparseparentaccounts j2 ,jparsealiases = jparsealiases j2 -- ,jparsetransactioncount = jparsetransactioncount j1 + jparsetransactioncount j2 ,jparsetimeclockentries = jparsetimeclockentries j1 <> jparsetimeclockentries j2 ,jincludefilestack = jincludefilestack j2 ,jdeclaredpayees = jdeclaredpayees j1 <> jdeclaredpayees j2 ,jdeclaredtags = jdeclaredtags j1 <> jdeclaredtags j2 ,jdeclaredaccounts = jdeclaredaccounts j1 <> jdeclaredaccounts j2 ,jdeclaredaccounttags = jdeclaredaccounttags j1 <> jdeclaredaccounttags j2 ,jdeclaredaccounttypes = jdeclaredaccounttypes j1 <> jdeclaredaccounttypes j2 ,jaccounttypes = jaccounttypes j1 <> jaccounttypes j2 ,jglobalcommoditystyles = jglobalcommoditystyles j1 <> jglobalcommoditystyles j2 ,jcommodities = jcommodities j1 <> jcommodities j2 ,jinferredcommodities = jinferredcommodities j1 <> jinferredcommodities j2 ,jpricedirectives = jpricedirectives j1 <> jpricedirectives j2 ,jinferredmarketprices = jinferredmarketprices j1 <> jinferredmarketprices j2 ,jtxnmodifiers = jtxnmodifiers j1 <> jtxnmodifiers j2 ,jperiodictxns = jperiodictxns j1 <> jperiodictxns j2 ,jtxns = jtxns j1 <> jtxns j2 ,jfinalcommentlines = jfinalcommentlines j2 -- XXX discards j1's ? ,jfiles = jfiles j1 <> jfiles j2 ,jlastreadtime = max (jlastreadtime j1) (jlastreadtime j2) } -- | Renumber all the account declarations. This is useful to call when -- finalising or concatenating Journals, to give account declarations -- a total order across files. journalRenumberAccountDeclarations :: Journal -> Journal journalRenumberAccountDeclarations j = j{jdeclaredaccounts=jdas'} where jdas' = [(a, adi{adideclarationorder=n}) | (n, (a,adi)) <- zip [1..] $ jdeclaredaccounts j] -- the per-file declaration order saved during parsing is discarded, -- it seems unneeded except perhaps for debugging -- | Debug log the ordering of a journal's account declarations -- (at debug level 5+). dbgJournalAcctDeclOrder :: String -> Journal -> Journal dbgJournalAcctDeclOrder prefix = traceOrLogAtWith 5 ((prefix++) . showAcctDeclsSummary . jdeclaredaccounts) where showAcctDeclsSummary :: [(AccountName,AccountDeclarationInfo)] -> String showAcctDeclsSummary adis | length adis < (2*n+2) = "[" <> showadis adis <> "]" | otherwise = "[" <> showadis (take n adis) <> " ... " <> showadis (takelast n adis) <> "]" where n = 3 showadis = intercalate ", " . map showadi showadi (a,adi) = "("<>show (adideclarationorder adi)<>","<>T.unpack a<>")" takelast n' = reverse . take n' . reverse instance Default Journal where def = nulljournal nulljournal :: Journal nulljournal = Journal { jparsedefaultyear = Nothing ,jparsedefaultcommodity = Nothing ,jparsedecimalmark = Nothing ,jparseparentaccounts = [] ,jparsealiases = [] -- ,jparsetransactioncount = 0 ,jparsetimeclockentries = [] ,jincludefilestack = [] ,jdeclaredpayees = [] ,jdeclaredtags = [] ,jdeclaredaccounts = [] ,jdeclaredaccounttags = M.empty ,jdeclaredaccounttypes = M.empty ,jaccounttypes = M.empty ,jglobalcommoditystyles = M.empty ,jcommodities = M.empty ,jinferredcommodities = M.empty ,jpricedirectives = [] ,jinferredmarketprices = [] ,jtxnmodifiers = [] ,jperiodictxns = [] ,jtxns = [] ,jfinalcommentlines = "" ,jfiles = [] ,jlastreadtime = 0 } journalFilePath :: Journal -> FilePath journalFilePath = fst . mainfile journalFilePaths :: Journal -> [FilePath] journalFilePaths = map fst . jfiles mainfile :: Journal -> (FilePath, Text) mainfile = headDef ("(unknown)", "") . jfiles addTransaction :: Transaction -> Journal -> Journal addTransaction t j = j { jtxns = t : jtxns j } addTransactionModifier :: TransactionModifier -> Journal -> Journal addTransactionModifier mt j = j { jtxnmodifiers = mt : jtxnmodifiers j } addPeriodicTransaction :: PeriodicTransaction -> Journal -> Journal addPeriodicTransaction pt j = j { jperiodictxns = pt : jperiodictxns j } addPriceDirective :: PriceDirective -> Journal -> Journal addPriceDirective h j = j { jpricedirectives = h : jpricedirectives j } -- XXX #999 keep sorted -- | Get the transaction with this index (its 1-based position in the input stream), if any. journalTransactionAt :: Journal -> Integer -> Maybe Transaction journalTransactionAt Journal{jtxns=ts} i = -- it's probably ts !! (i+1), but we won't assume headMay [t | t <- ts, tindex t == i] -- | Get the transaction that appeared immediately after this one in the input stream, if any. journalNextTransaction :: Journal -> Transaction -> Maybe Transaction journalNextTransaction j t = journalTransactionAt j (tindex t + 1) -- | Get the transaction that appeared immediately before this one in the input stream, if any. journalPrevTransaction :: Journal -> Transaction -> Maybe Transaction journalPrevTransaction j t = journalTransactionAt j (tindex t - 1) -- | All postings from this journal's transactions, in order. journalPostings :: Journal -> [Posting] journalPostings = concatMap tpostings . jtxns -- | Sorted unique commodity symbols declared by commodity directives in this journal. journalCommoditiesDeclared :: Journal -> [CommoditySymbol] journalCommoditiesDeclared = M.keys . jcommodities -- | Sorted unique commodity symbols declared or inferred from this journal. journalCommodities :: Journal -> S.Set CommoditySymbol journalCommodities j = M.keysSet (jcommodities j) <> M.keysSet (jinferredcommodities j) -- | Unique transaction descriptions used in this journal. journalDescriptions :: Journal -> [Text] journalDescriptions = nubSort . map tdescription . jtxns -- | Sorted unique payees declared by payee directives in this journal. journalPayeesDeclared :: Journal -> [Payee] journalPayeesDeclared = nubSort . map fst . jdeclaredpayees -- | Sorted unique payees used by transactions in this journal. journalPayeesUsed :: Journal -> [Payee] journalPayeesUsed = nubSort . map transactionPayee . jtxns -- | Sorted unique payees used in transactions or declared by payee directives in this journal. journalPayeesDeclaredOrUsed :: Journal -> [Payee] journalPayeesDeclaredOrUsed j = toList $ foldMap S.fromList [journalPayeesDeclared j, journalPayeesUsed j] -- | Sorted unique tag names declared by tag directives in this journal. journalTagsDeclared :: Journal -> [TagName] journalTagsDeclared = nubSort . map fst . jdeclaredtags -- | Sorted unique tag names used in this journal (in account directives, transactions, postings..) journalTagsUsed :: Journal -> [TagName] journalTagsUsed j = nubSort $ map fst $ concatMap transactionAllTags $ jtxns j -- tags used in all transactions and postings and postings' accounts -- | Sorted unique tag names used in transactions or declared by tag directives in this journal. journalTagsDeclaredOrUsed :: Journal -> [TagName] journalTagsDeclaredOrUsed j = toList $ foldMap S.fromList [journalTagsDeclared j, journalTagsUsed j] -- | Sorted unique account names posted to by this journal's transactions. journalAccountNamesUsed :: Journal -> [AccountName] journalAccountNamesUsed = accountNamesFromPostings . journalPostings -- | Sorted unique account names implied by this journal's transactions - -- accounts posted to and all their implied parent accounts. journalAccountNamesImplied :: Journal -> [AccountName] journalAccountNamesImplied = expandAccountNames . journalAccountNamesUsed -- | Sorted unique account names declared by account directives in this journal. journalAccountNamesDeclared :: Journal -> [AccountName] journalAccountNamesDeclared = nubSort . map fst . jdeclaredaccounts -- | Sorted unique account names declared by account directives in this journal, -- which have no children. journalLeafAccountNamesDeclared :: Journal -> [AccountName] journalLeafAccountNamesDeclared = treeLeaves . accountNameTreeFrom . journalAccountNamesDeclared -- | Sorted unique account names declared by account directives or posted to -- by transactions in this journal. journalAccountNamesDeclaredOrUsed :: Journal -> [AccountName] journalAccountNamesDeclaredOrUsed j = toList $ foldMap S.fromList [journalAccountNamesDeclared j, journalAccountNamesUsed j] -- | Sorted unique account names declared by account directives, or posted to -- or implied as parents by transactions in this journal. journalAccountNamesDeclaredOrImplied :: Journal -> [AccountName] journalAccountNamesDeclaredOrImplied j = toList $ foldMap S.fromList [journalAccountNamesDeclared j, expandAccountNames $ journalAccountNamesUsed j] -- | Convenience/compatibility alias for journalAccountNamesDeclaredOrImplied. journalAccountNames :: Journal -> [AccountName] journalAccountNames = journalAccountNamesDeclaredOrImplied -- | Sorted unique account names declared or implied in this journal -- which have no children. journalLeafAccountNames :: Journal -> [AccountName] journalLeafAccountNames = treeLeaves . journalAccountNameTree journalAccountNameTree :: Journal -> Tree AccountName journalAccountNameTree = accountNameTreeFrom . journalAccountNamesDeclaredOrImplied -- | Which tags have been declared explicitly for this account, if any ? journalAccountTags :: Journal -> AccountName -> [Tag] journalAccountTags Journal{jdeclaredaccounttags} a = M.findWithDefault [] a jdeclaredaccounttags -- | Which tags are in effect for this account, including tags inherited from parent accounts ? journalInheritedAccountTags :: Journal -> AccountName -> [Tag] journalInheritedAccountTags j a = foldl' (\ts a' -> ts `union` journalAccountTags j a') [] as where as = a : parentAccountNames a -- PERF: cache in journal ? type DateWeightedSimilarityScore = Double type SimilarityScore = Double type Age = Integer -- | Find up to N most similar and most recent transactions matching -- the given transaction description and query and exceeding the given -- description similarity score (0 to 1, see compareDescriptions). -- Returns transactions along with -- their age in days compared to the latest transaction date, -- their description similarity score, -- and a heuristically date-weighted variant of this that favours more recent transactions. journalTransactionsSimilarTo :: Journal -> Text -> Query -> SimilarityScore -> Int -> [(DateWeightedSimilarityScore, Age, SimilarityScore, Transaction)] journalTransactionsSimilarTo Journal{jtxns} desc q similaritythreshold n = take n $ dbg1With ( unlines . ("up to 30 transactions above description similarity threshold "<>show similaritythreshold<>" ordered by recency-weighted similarity:":) . take 30 . map ( \(w,a,s,Transaction{..}) -> printf "weighted:%8.3f age:%4d similarity:%5.3f %s %s" w a s (show tdate) tdescription )) $ sortBy (comparing (negate.first4)) $ map (\(s,t) -> (weightedScore (s,t), age t, s, t)) $ filter ((> similaritythreshold).fst) [(compareDescriptions desc $ tdescription t, t) | t <- jtxns, q `matchesTransaction` t] where latest = lastDef nulldate $ sort $ map tdate jtxns age = diffDays latest . tdate -- Combine similarity and recency heuristically. This gave decent results -- in my "find most recent invoice" use case in 2023-03, -- but will probably need more attention. weightedScore :: (Double, Transaction) -> Double weightedScore (s, t) = 100 * s - fromIntegral (age t) / 4 -- | Return a similarity score from 0 to 1.5 for two transaction descriptions. -- This is based on compareStrings, with the following modifications: -- -- - numbers are stripped out before measuring similarity -- -- - if the (unstripped) first description appears in its entirety within the second, -- the score is boosted by 0.5. -- compareDescriptions :: Text -> Text -> Double compareDescriptions a b = (if a `T.isInfixOf` b then (0.5+) else id) $ compareStrings (simplify a) (simplify b) where simplify = T.unpack . T.filter (not.isDigit) -- | Return a similarity score from 0 to 1 for two strings. This -- was based on Simon White's string similarity algorithm -- (http://www.catalysoft.com/articles/StrikeAMatch.html), later found -- to be https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient, -- and modified to handle short strings better. -- Todo: check out http://nlp.fi.muni.cz/raslan/2008/raslan08.pdf#page=14 . compareStrings :: String -> String -> Double compareStrings "" "" = 1 compareStrings [_] "" = 0 compareStrings "" [_] = 0 compareStrings [a] [b] = if toUpper a == toUpper b then 1 else 0 compareStrings s1 s2 = 2 * commonpairs / totalpairs where pairs1 = S.fromList $ wordLetterPairs $ uppercase s1 pairs2 = S.fromList $ wordLetterPairs $ uppercase s2 commonpairs = fromIntegral $ S.size $ S.intersection pairs1 pairs2 totalpairs = fromIntegral $ S.size pairs1 + S.size pairs2 wordLetterPairs :: String -> [String] wordLetterPairs = concatMap letterPairs . words letterPairs :: String -> [String] letterPairs (a:b:rest) = [a,b] : letterPairs (b:rest) letterPairs _ = [] -- | The 'AccountName' to use for automatically generated conversion postings. journalConversionAccount :: Journal -> AccountName journalConversionAccount = headDef (T.pack "equity:conversion") . M.findWithDefault [] Conversion . jdeclaredaccounttypes -- Newer account type code. journalAccountType :: Journal -> AccountName -> Maybe AccountType journalAccountType Journal{jaccounttypes} = accountNameType jaccounttypes -- | Add a map of all known account types to the journal. journalAddAccountTypes :: Journal -> Journal journalAddAccountTypes j = j{jaccounttypes = journalAccountTypes j} -- | Build a map of all known account types, explicitly declared -- or inferred from the account's parent or name. journalAccountTypes :: Journal -> M.Map AccountName AccountType journalAccountTypes j = M.fromList [(a,acctType) | (a, Just (acctType,_)) <- flatten t'] where t = accountNameTreeFrom $ journalAccountNames j :: Tree AccountName -- Map from the top of the account tree down to the leaves, propagating -- account types downward. Keep track of whether the account is declared -- (True), in which case the parent account should be preferred, or merely -- inferred (False), in which case the inferred type should be preferred. t' = settypes Nothing t :: Tree (AccountName, Maybe (AccountType, Bool)) where settypes :: Maybe (AccountType, Bool) -> Tree AccountName -> Tree (AccountName, Maybe (AccountType, Bool)) settypes mparenttype (Node a subs) = Node (a, mtype) (map (settypes mtype) subs) where mtype = M.lookup a declaredtypes <|> minferred where declaredtypes = (,True) <$> journalDeclaredAccountTypes j minferred = if maybe False snd mparenttype then mparenttype else (,False) <$> accountNameInferType a <|> mparenttype -- | Build a map of the account types explicitly declared. journalDeclaredAccountTypes :: Journal -> M.Map AccountName AccountType journalDeclaredAccountTypes Journal{jdeclaredaccounttypes} = M.fromList $ concat [map (,t) as | (t,as) <- M.toList jdeclaredaccounttypes] -- | To all postings in the journal, add any tags from their account -- (including those inherited from parent accounts). -- If the same tag exists on posting and account, the latter is ignored. journalPostingsAddAccountTags :: Journal -> Journal journalPostingsAddAccountTags j = journalMapPostings addtags j where addtags p = p `postingAddTags` (journalInheritedAccountTags j $ paccount p) -- Various kinds of filtering on journals. We do it differently depending -- on the command. ------------------------------------------------------------------------------- -- filtering V2 -- | Keep only transactions matching the query expression. filterJournalTransactions :: Query -> Journal -> Journal filterJournalTransactions q j@Journal{jtxns} = j{jtxns=filter (matchesTransactionExtra (journalAccountType j) q) jtxns} -- | Keep only postings matching the query expression. -- This can leave unbalanced transactions. filterJournalPostings :: Query -> Journal -> Journal filterJournalPostings q j@Journal{jtxns=ts} = j{jtxns=map (filterTransactionPostingsExtra (journalAccountType j) q) ts} -- | Keep only postings which do not match the query expression, but for which a related posting does. -- This can leave unbalanced transactions. filterJournalRelatedPostings :: Query -> Journal -> Journal filterJournalRelatedPostings q j@Journal{jtxns=ts} = j{jtxns=map (filterTransactionRelatedPostings q) ts} -- | Within each posting's amount, keep only the parts matching the query, and -- remove any postings with all amounts removed. -- This can leave unbalanced transactions. filterJournalAmounts :: Query -> Journal -> Journal filterJournalAmounts q j@Journal{jtxns=ts} = j{jtxns=map (filterTransactionAmounts q) ts} -- | Filter out all parts of this transaction's amounts which do not match the -- query, and remove any postings with all amounts removed. -- This can leave the transaction unbalanced. filterTransactionAmounts :: Query -> Transaction -> Transaction filterTransactionAmounts q t@Transaction{tpostings=ps} = t{tpostings=mapMaybe (filterPostingAmount q) ps} -- | Filter out all parts of this posting's amount which do not match the query, and remove the posting -- if this removes all amounts. filterPostingAmount :: Query -> Posting -> Maybe Posting filterPostingAmount q p@Posting{pamount=as} | null newamt = Nothing | otherwise = Just p{pamount=Mixed newamt} where Mixed newamt = filterMixedAmount (q `matchesAmount`) as filterTransactionPostings :: Query -> Transaction -> Transaction filterTransactionPostings q t@Transaction{tpostings=ps} = t{tpostings=filter (q `matchesPosting`) ps} -- Like filterTransactionPostings, but is given the map of account types so can also filter by account type. filterTransactionPostingsExtra :: (AccountName -> Maybe AccountType) -> Query -> Transaction -> Transaction filterTransactionPostingsExtra atypes q t@Transaction{tpostings=ps} = t{tpostings=filter (matchesPostingExtra atypes q) ps} filterTransactionRelatedPostings :: Query -> Transaction -> Transaction filterTransactionRelatedPostings q t@Transaction{tpostings=ps} = t{tpostings=if null matches then [] else ps \\ matches} where matches = filter (matchesPosting q) ps -- | Apply a transformation to a journal's transactions. journalMapTransactions :: (Transaction -> Transaction) -> Journal -> Journal journalMapTransactions f j@Journal{jtxns=ts} = j{jtxns=map f ts} -- | Apply a transformation to a journal's postings. journalMapPostings :: (Posting -> Posting) -> Journal -> Journal journalMapPostings f j@Journal{jtxns=ts} = j{jtxns=map (transactionMapPostings f) ts} -- | Apply a transformation to a journal's posting amounts. journalMapPostingAmounts :: (MixedAmount -> MixedAmount) -> Journal -> Journal journalMapPostingAmounts f = journalMapPostings (postingTransformAmount f) {- ------------------------------------------------------------------------------- -- filtering V1 -- | Keep only transactions we are interested in, as described by the -- filter specification. filterJournalTransactions :: FilterSpec -> Journal -> Journal filterJournalTransactions FilterSpec{datespan=datespan ,cleared=cleared -- ,real=real -- ,empty=empty ,acctpats=apats ,descpats=dpats ,depth=depth ,fMetadata=md } = filterJournalTransactionsByStatus cleared . filterJournalPostingsByDepth depth . filterJournalTransactionsByAccount apats . filterJournalTransactionsByMetadata md . filterJournalTransactionsByDescription dpats . filterJournalTransactionsByDate datespan -- | Keep only postings we are interested in, as described by the filter -- specification. This can leave unbalanced transactions. filterJournalPostings :: FilterSpec -> Journal -> Journal filterJournalPostings FilterSpec{datespan=datespan ,cleared=cleared ,real=real ,empty=empty ,acctpats=apats ,descpats=dpats ,depth=depth ,fMetadata=md } = filterJournalPostingsByRealness real . filterJournalPostingsByStatus cleared . filterJournalPostingsByEmpty empty . filterJournalPostingsByDepth depth . filterJournalPostingsByAccount apats . filterJournalTransactionsByMetadata md . filterJournalTransactionsByDescription dpats . filterJournalTransactionsByDate datespan -- | Keep only transactions whose metadata matches all metadata specifications. filterJournalTransactionsByMetadata :: [(String,String)] -> Journal -> Journal filterJournalTransactionsByMetadata pats j@Journal{jtxns=ts} = j{jtxns=filter matchmd ts} where matchmd t = all (`elem` tmetadata t) pats -- | Keep only transactions whose description matches the description patterns. filterJournalTransactionsByDescription :: [String] -> Journal -> Journal filterJournalTransactionsByDescription pats j@Journal{jtxns=ts} = j{jtxns=filter matchdesc ts} where matchdesc = matchpats pats . tdescription -- | Keep only transactions which fall between begin and end dates. -- We include transactions on the begin date and exclude transactions on the end -- date, like ledger. An empty date string means no restriction. filterJournalTransactionsByDate :: DateSpan -> Journal -> Journal filterJournalTransactionsByDate (DateSpan begin end) j@Journal{jtxns=ts} = j{jtxns=filter match ts} where match t = maybe True (tdate t>=) begin && maybe True (tdate t<) end -- | Keep only transactions which have the requested cleared/uncleared -- status, if there is one. filterJournalTransactionsByStatus :: Maybe Bool -> Journal -> Journal filterJournalTransactionsByStatus Nothing j = j filterJournalTransactionsByStatus (Just val) j@Journal{jtxns=ts} = j{jtxns=filter match ts} where match = (==val).tstatus -- | Keep only postings which have the requested cleared/uncleared status, -- if there is one. filterJournalPostingsByStatus :: Maybe Bool -> Journal -> Journal filterJournalPostingsByStatus Nothing j = j filterJournalPostingsByStatus (Just c) j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter ((==c) . postingCleared) ps} -- | Strip out any virtual postings, if the flag is true, otherwise do -- no filtering. filterJournalPostingsByRealness :: Bool -> Journal -> Journal filterJournalPostingsByRealness False j = j filterJournalPostingsByRealness True j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter isReal ps} -- | Strip out any postings with zero amount, unless the flag is true. filterJournalPostingsByEmpty :: Bool -> Journal -> Journal filterJournalPostingsByEmpty True j = j filterJournalPostingsByEmpty False j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter (not . isEmptyPosting) ps} -- -- | Keep only transactions which affect accounts deeper than the specified depth. -- filterJournalTransactionsByDepth :: Maybe Int -> Journal -> Journal -- filterJournalTransactionsByDepth Nothing j = j -- filterJournalTransactionsByDepth (Just d) j@Journal{jtxns=ts} = -- j{jtxns=(filter (any ((<= d+1) . accountNameLevel . paccount) . tpostings) ts)} -- | Strip out any postings to accounts deeper than the specified depth -- (and any transactions which have no postings as a result). filterJournalPostingsByDepth :: Maybe Int -> Journal -> Journal filterJournalPostingsByDepth Nothing j = j filterJournalPostingsByDepth (Just d) j@Journal{jtxns=ts} = j{jtxns=filter (not . null . tpostings) $ map filtertxns ts} where filtertxns t@Transaction{tpostings=ps} = t{tpostings=filter ((<= d) . accountNameLevel . paccount) ps} -- | Keep only postings which affect accounts matched by the account patterns. -- This can leave transactions unbalanced. filterJournalPostingsByAccount :: [String] -> Journal -> Journal filterJournalPostingsByAccount apats j@Journal{jtxns=ts} = j{jtxns=map filterpostings ts} where filterpostings t@Transaction{tpostings=ps} = t{tpostings=filter (matchpats apats . paccount) ps} -- | Keep only transactions which affect accounts matched by the account patterns. -- More precisely: each positive account pattern excludes transactions -- which do not contain a posting to a matched account, and each negative -- account pattern excludes transactions containing a posting to a matched -- account. filterJournalTransactionsByAccount :: [String] -> Journal -> Journal filterJournalTransactionsByAccount apats j@Journal{jtxns=ts} = j{jtxns=filter tmatch ts} where tmatch t = (null positives || any positivepmatch ps) && (null negatives || not (any negativepmatch ps)) where ps = tpostings t positivepmatch p = any (`amatch` a) positives where a = paccount p negativepmatch p = any (`amatch` a) negatives where a = paccount p amatch pat a = regexMatchesCI (abspat pat) a (negatives,positives) = partition isnegativepat apats -} -- | Reverse all lists of parsed items, which during parsing were -- prepended to, so that the items are in parse order. Part of -- post-parse finalisation. journalReverse :: Journal -> Journal journalReverse j = j {jfiles = reverse $ jfiles j ,jdeclaredaccounts = reverse $ jdeclaredaccounts j ,jtxns = reverse $ jtxns j ,jtxnmodifiers = reverse $ jtxnmodifiers j ,jperiodictxns = reverse $ jperiodictxns j ,jpricedirectives = reverse $ jpricedirectives j } -- | Set this journal's last read time, ie when its files were last read. journalSetLastReadTime :: POSIXTime -> Journal -> Journal journalSetLastReadTime t j = j{ jlastreadtime = t } journalNumberAndTieTransactions = journalTieTransactions . journalNumberTransactions -- | Number (set the tindex field) this journal's transactions, counting upward from 1. journalNumberTransactions :: Journal -> Journal journalNumberTransactions j@Journal{jtxns=ts} = j{jtxns=zipWith (\i t -> t{tindex=i}) [1..] ts} -- | Tie the knot in all of this journal's transactions, ensuring their postings -- refer to them. This should be done last, after any other transaction-modifying operations. journalTieTransactions :: Journal -> Journal journalTieTransactions j@Journal{jtxns=ts} = j{jtxns=map txnTieKnot ts} -- | Untie all transaction-posting knots in this journal, so that eg -- recursiveSize and GHCI's :sprint can work on it. journalUntieTransactions :: Transaction -> Transaction journalUntieTransactions t@Transaction{tpostings=ps} = t{tpostings=map (\p -> p{ptransaction=Nothing}) ps} -- | Apply any transaction modifier rules in the journal (adding automated -- postings to transactions, eg). Or if a modifier rule fails to parse, -- return the error message. A reference date is provided to help interpret -- relative dates in transaction modifier queries. -- The first argument selects whether to add visible tags to generated postings & modified transactions. journalModifyTransactions :: Bool -> Day -> Journal -> Either String Journal journalModifyTransactions verbosetags d j = case modifyTransactions (journalAccountType j) (journalInheritedAccountTags j) (journalCommodityStyles j) d verbosetags (jtxnmodifiers j) (jtxns j) of Right ts -> Right j{jtxns=ts} Left err -> Left err -- | Choose and apply a consistent display style to the posting -- amounts in each commodity (see journalCommodityStyles). -- Can return an error message eg if inconsistent number formats are found. journalApplyCommodityStyles :: Journal -> Either String Journal journalApplyCommodityStyles = fmap fixjournal . journalInferCommodityStyles where fixjournal j@Journal{jpricedirectives=pds} = journalMapPostings (postingApplyCommodityStyles styles) j{jpricedirectives=map fixpricedirective pds} where styles = journalCommodityStyles j fixpricedirective pd@PriceDirective{pdamount=a} = pd{pdamount=styleAmountExceptPrecision styles a} -- | Get the canonical amount styles for this journal, whether (in order of precedence): -- set globally in InputOpts, -- declared by commodity directives, -- declared by a default commodity (D) directive, -- or inferred from posting amounts, -- as a map from symbol to style. -- Styles from directives are assumed to specify the decimal mark. journalCommodityStyles :: Journal -> M.Map CommoditySymbol AmountStyle journalCommodityStyles j = -- XXX could be some redundancy here, cf journalStyleInfluencingAmounts globalstyles <> declaredstyles <> defaultcommoditystyle <> inferredstyles where globalstyles = jglobalcommoditystyles j declaredstyles = M.mapMaybe cformat $ jcommodities j defaultcommoditystyle = M.fromList $ catMaybes [jparsedefaultcommodity j] inferredstyles = jinferredcommodities j -- | Collect and save inferred amount styles for each commodity based on -- the posting amounts in that commodity (excluding price amounts), ie: -- "the format of the first amount, adjusted to the highest precision of all amounts". -- Can return an error message eg if inconsistent number formats are found. journalInferCommodityStyles :: Journal -> Either String Journal journalInferCommodityStyles j = case commodityStylesFromAmounts $ journalStyleInfluencingAmounts j of Left e -> Left e Right cs -> Right j{jinferredcommodities = dbg7 "journalInferCommodityStyles" cs} -- | Given a list of amounts, in parse order (roughly speaking; see journalStyleInfluencingAmounts), -- build a map from their commodity names to standard commodity -- display formats. Can return an error message eg if inconsistent -- number formats are found. -- -- Though, these amounts may have come from multiple files, so we -- shouldn't assume they use consistent number formats. -- Currently we don't enforce that even within a single file, -- and this function never reports an error. -- commodityStylesFromAmounts :: [Amount] -> Either String (M.Map CommoditySymbol AmountStyle) commodityStylesFromAmounts = Right . foldr (\a -> M.insertWith canonicalStyle (acommodity a) (astyle a)) mempty -- | Given a list of amount styles (assumed to be from parsed amounts -- in a single commodity), in parse order, choose a canonical style. canonicalStyleFrom :: [AmountStyle] -> AmountStyle canonicalStyleFrom = foldl' canonicalStyle amountstyle -- TODO: should probably detect and report inconsistencies here. -- Though, we don't have the info for a good error message, so maybe elsewhere. -- | Given a pair of AmountStyles, choose a canonical style. -- This is: -- the general style of the first amount, -- with the first digit group style seen, -- with the maximum precision of all. canonicalStyle :: AmountStyle -> AmountStyle -> AmountStyle canonicalStyle a b = a{asprecision=prec, asdecimalpoint=decmark, asdigitgroups=mgrps} where -- precision is maximum of all precisions prec = max (asprecision a) (asprecision b) -- identify the digit group mark (& group sizes) mgrps = asdigitgroups a <|> asdigitgroups b -- if a digit group mark was identified above, we can rely on that; -- make sure the decimal mark is different. If not, default to period. defdecmark = case mgrps of Just (DigitGroups '.' _) -> ',' _ -> '.' -- identify the decimal mark: the first one used, or the above default, -- but never the same character as the digit group mark. -- urgh.. refactor.. decmark = case mgrps of Just _ -> Just defdecmark Nothing -> asdecimalpoint a <|> asdecimalpoint b <|> Just defdecmark -- -- | Apply this journal's historical price records to unpriced amounts where possible. -- journalApplyPriceDirectives :: Journal -> Journal -- journalApplyPriceDirectives j@Journal{jtxns=ts} = j{jtxns=map fixtransaction ts} -- where -- fixtransaction t@Transaction{tdate=d, tpostings=ps} = t{tpostings=map fixposting ps} -- where -- fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a} -- fixmixedamount = mapMixedAmount fixamount -- fixamount = fixprice -- fixprice a@Amount{price=Just _} = a -- fixprice a@Amount{commodity=c} = a{price=maybe Nothing (Just . UnitPrice) $ journalPriceDirectiveFor j d c} -- -- | Get the price for a commodity on the specified day from the price database, if known. -- -- Does only one lookup step, ie will not look up the price of a price. -- journalPriceDirectiveFor :: Journal -> Day -> CommoditySymbol -> Maybe MixedAmount -- journalPriceDirectiveFor j d CommoditySymbol{symbol=s} = do -- let ps = reverse $ filter ((<= d).pddate) $ filter ((s==).hsymbol) $ sortBy (comparing pddate) $ jpricedirectives j -- case ps of (PriceDirective{pdamount=a}:_) -> Just a -- _ -> Nothing -- | Infer transaction-implied market prices from commodity-exchanging -- transactions, if any. It's best to call this after transactions have -- been balanced and posting amounts have appropriate prices attached. journalInferMarketPricesFromTransactions :: Journal -> Journal journalInferMarketPricesFromTransactions j = j{jinferredmarketprices = dbg4 "jinferredmarketprices" . map priceDirectiveToMarketPrice . concatMap postingPriceDirectivesFromCost $ journalPostings j } -- | Convert all this journal's amounts to cost using the transaction prices, if any. -- The journal's commodity styles are applied to the resulting amounts. journalToCost :: ConversionOp -> Journal -> Journal journalToCost cost j@Journal{jtxns=ts} = j{jtxns=map (transactionToCost styles cost) ts} where styles = journalCommodityStyles j -- | Add inferred equity postings to a 'Journal' using transaction prices. journalAddInferredEquityPostings :: Bool -> Journal -> Journal journalAddInferredEquityPostings verbosetags j = journalMapTransactions (transactionAddInferredEquityPostings verbosetags equityAcct) j where equityAcct = journalConversionAccount j -- | Add costs inferred from equity conversion postings, where needed and possible. -- See hledger manual > Inferring cost from equity postings. journalInferCostsFromEquity :: Journal -> Either String Journal journalInferCostsFromEquity j = do ts <- mapM (transactionInferCostsFromEquity False $ jaccounttypes j) $ jtxns j return j{jtxns=ts} -- | Do just the internal tagging that is normally done by journalInferCostsFromEquity, -- identifying equity conversion postings and, in particular, postings which have redundant costs. -- Tagging the latter is useful as it allows them to be ignored during transaction balancedness checking. -- And that allows journalInferCostsFromEquity to be postponed till after transaction balancing, -- when it will have more information (amounts) to work with. journalMarkRedundantCosts :: Journal -> Either String Journal journalMarkRedundantCosts j = do ts <- mapM (transactionInferCostsFromEquity True $ jaccounttypes j) $ jtxns j return j{jtxns=ts} -- -- | Get this journal's unique, display-preference-canonicalised commodities, by symbol. -- journalCanonicalCommodities :: Journal -> M.Map String CommoditySymbol -- journalCanonicalCommodities j = canonicaliseCommodities $ journalAmountCommodities j -- -- | Get all this journal's amounts' commodities, in the order parsed. -- journalAmountCommodities :: Journal -> [CommoditySymbol] -- journalAmountCommodities = map acommodity . concatMap amounts . journalAmounts -- -- | Get all this journal's amount and price commodities, in the order parsed. -- journalAmountAndPriceCommodities :: Journal -> [CommoditySymbol] -- journalAmountAndPriceCommodities = concatMap amountCommodities . concatMap amounts . journalAmounts -- -- | Get this amount's commodity and any commodities referenced in its price. -- amountCommodities :: Amount -> [CommoditySymbol] -- amountCommodities Amount{acommodity=c,aprice=p} = -- case p of Nothing -> [c] -- Just (UnitPrice ma) -> c:(concatMap amountCommodities $ amounts ma) -- Just (TotalPrice ma) -> c:(concatMap amountCommodities $ amounts ma) -- | Get an ordered list of amounts in this journal which can -- influence canonical amount display styles. Those amounts are, in -- the following order: -- -- * amounts in market price (P) directives (in parse order) -- * posting amounts in transactions (in parse order) -- * the amount in the final default commodity (D) directive -- -- Transaction price amounts (posting amounts' aprice field) are not included. -- journalStyleInfluencingAmounts :: Journal -> [Amount] journalStyleInfluencingAmounts j = dbg7 "journalStyleInfluencingAmounts" $ catMaybes $ concat [ [mdefaultcommodityamt] ,map (Just . pdamount) $ jpricedirectives j ,map Just . concatMap (amountsRaw . pamount) $ journalPostings j ] where -- D's amount style isn't actually stored as an amount, make it into one mdefaultcommodityamt = case jparsedefaultcommodity j of Just (symbol,style) -> Just nullamt{acommodity=symbol,astyle=style} Nothing -> Nothing -- overcomplicated/unused amount traversal stuff -- -- Get an ordered list of 'AmountStyle's from the amounts in this -- journal which influence canonical amount display styles. See -- traverseJournalAmounts. -- journalAmounts :: Journal -> [Amount] -- journalAmounts = getConst . traverseJournalAmounts (Const . (:[])) -- -- Apply a transformation to the journal amounts traversed by traverseJournalAmounts. -- overJournalAmounts :: (Amount -> Amount) -> Journal -> Journal -- overJournalAmounts f = runIdentity . traverseJournalAmounts (Identity . f) -- -- A helper that traverses over most amounts in the journal, -- in particular the ones which influence canonical amount display styles, -- processing them with the given applicative function. -- -- These include, in the following order: -- -- * the amount in the final default commodity (D) directive -- * amounts in market price (P) directives (in parse order) -- * posting amounts in transactions (in parse order) -- -- Transaction price amounts, which may be embedded in posting amounts -- (the aprice field), are left intact but not traversed/processed. -- -- traverseJournalAmounts :: Applicative f => (Amount -> f Amount) -> Journal -> f Journal -- traverseJournalAmounts f j = -- recombine <$> (traverse . dcamt) f (jparsedefaultcommodity j) -- <*> (traverse . pdamt) f (jpricedirectives j) -- <*> (traverse . tps . traverse . pamt . amts . traverse) f (jtxns j) -- where -- recombine pds txns = j { jpricedirectives = pds, jtxns = txns } -- -- a bunch of traversals -- dcamt g pd = (\mdc -> case mdc of Nothing -> Nothing -- Just ((c,stpd{pdamount =amt} -- ) <$> g (pdamount pd) -- pdamt g pd = (\amt -> pd{pdamount =amt}) <$> g (pdamount pd) -- tps g t = (\ps -> t {tpostings=ps }) <$> g (tpostings t) -- pamt g p = (\amt -> p {pamount =amt}) <$> g (pamount p) -- amts g (Mixed as) = Mixed <$> g as -- | The fully specified exact date span enclosing the dates (primary or secondary) -- of all this journal's transactions and postings, or DateSpan Nothing Nothing -- if there are none. journalDateSpan :: Bool -> Journal -> DateSpan journalDateSpan False = journalDateSpanHelper $ Just PrimaryDate journalDateSpan True = journalDateSpanHelper $ Just SecondaryDate -- | The fully specified date span enclosing the dates (primary and secondary) -- of all this journal's transactions and postings, or DateSpan Nothing Nothing -- if there are none. journalDateSpanBothDates :: Journal -> DateSpan journalDateSpanBothDates = journalDateSpanHelper Nothing -- | A helper for journalDateSpan which takes Maybe WhichDate directly. Nothing -- uses both primary and secondary dates. journalDateSpanHelper :: Maybe WhichDate -> Journal -> DateSpan journalDateSpanHelper whichdate j = DateSpan (Exact <$> minimumMay dates) (Exact . addDays 1 <$> maximumMay dates) where dates = pdates ++ tdates tdates = concatMap gettdate ts pdates = concatMap getpdate $ concatMap tpostings ts ts = jtxns j gettdate t = case whichdate of Just PrimaryDate -> [tdate t] Just SecondaryDate -> [fromMaybe (tdate t) $ tdate2 t] Nothing -> tdate t : maybeToList (tdate2 t) getpdate p = case whichdate of Just PrimaryDate -> maybeToList $ pdate p Just SecondaryDate -> maybeToList $ pdate2 p <|> pdate p Nothing -> catMaybes [pdate p, pdate2 p] -- | The earliest of this journal's transaction and posting dates, or -- Nothing if there are none. journalStartDate :: Bool -> Journal -> Maybe Day journalStartDate secondary j = fromEFDay <$> b where DateSpan b _ = journalDateSpan secondary j -- | The "exclusive end date" of this journal: the day following its latest transaction -- or posting date, or Nothing if there are none. journalEndDate :: Bool -> Journal -> Maybe Day journalEndDate secondary j = fromEFDay <$> e where DateSpan _ e = journalDateSpan secondary j -- | The latest of this journal's transaction and posting dates, or -- Nothing if there are none. journalLastDay :: Bool -> Journal -> Maybe Day journalLastDay secondary j = addDays (-1) <$> journalEndDate secondary j -- | Apply the pivot transformation to all postings in a journal, -- replacing their account name by their value for the given field or tag. journalPivot :: Text -> Journal -> Journal journalPivot fieldortagname j = j{jtxns = map (transactionPivot fieldortagname) . jtxns $ j} -- | Replace this transaction's postings' account names with the value -- of the given field or tag, if any. transactionPivot :: Text -> Transaction -> Transaction transactionPivot fieldortagname t = t{tpostings = map (postingPivot fieldortagname) . tpostings $ t} -- | Replace this posting's account name with the value -- of the given field or tag, if any, otherwise the empty string. postingPivot :: Text -> Posting -> Posting postingPivot fieldortagname p = p{paccount = pivotedacct, poriginal = Just $ originalPosting p} where pivotedacct | Just t <- ptransaction p, fieldortagname == "code" = tcode t | Just t <- ptransaction p, fieldortagname == "description" = tdescription t | Just t <- ptransaction p, fieldortagname == "payee" = transactionPayee t | Just t <- ptransaction p, fieldortagname == "note" = transactionNote t | Just t <- ptransaction p, fieldortagname == "status" = T.pack . show . tstatus $ t | Just (_, value) <- postingFindTag fieldortagname p = value | otherwise = "" postingFindTag :: TagName -> Posting -> Maybe (TagName, TagValue) postingFindTag tagname p = find ((tagname==) . fst) $ postingAllTags p -- | Apply some account aliases to all posting account names in the journal, as described by accountNameApplyAliases. -- This can fail due to a bad replacement pattern in a regular expression alias. journalApplyAliases :: [AccountAlias] -> Journal -> Either RegexError Journal -- short circuit the common case, just in case there's a performance impact from txnTieKnot etc. journalApplyAliases [] j = Right j journalApplyAliases aliases j = case mapM (transactionApplyAliases aliases) $ jtxns j of Right ts -> Right j{jtxns = ts} Left err -> Left err -- -- | Build a database of market prices in effect on the given date, -- -- from the journal's price directives. -- journalPrices :: Day -> Journal -> Prices -- journalPrices d = toPrices d . jpricedirectives -- -- | Render a market price as a P directive. -- showPriceDirectiveDirective :: PriceDirective -> String -- showPriceDirectiveDirective pd = unwords -- [ "P" -- , showDate (pddate pd) -- , T.unpack (pdcommodity pd) -- , (showAmount . amountSetPrecision maxprecision) (pdamount pd -- ) -- ] -- debug helpers -- traceAmountPrecision a = trace (show $ map (precision . acommodity) $ amounts a) a -- tracePostingsCommodities ps = trace (show $ map ((map (precision . acommodity) . amounts) . pamount) ps) ps -- tests -- -- A sample journal for testing, similar to examples/sample.journal. -- Provide an option to either use explicit amounts or missing amounts, for testing purposes. -- -- 2008/01/01 income -- assets:bank:checking $1 -- income:salary -- -- 2008/06/01 gift -- assets:bank:checking $1 -- income:gifts -- -- 2008/06/02 save -- assets:bank:saving $1 -- assets:bank:checking -- -- 2008/06/03 * eat & shop -- expenses:food $1 -- expenses:supplies $1 -- assets:cash -- -- 2008/10/01 take a loan -- assets:bank:checking $1 -- liabilities:debts $-1 -- -- 2008/12/31 * pay off -- liabilities:debts $1 -- assets:bank:checking samplejournal = samplejournalMaybeExplicit True samplejournalMaybeExplicit :: Bool -> Journal samplejournalMaybeExplicit explicit = nulljournal {jtxns = [ txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=fromGregorian 2008 01 01, tdate2=Nothing, tstatus=Unmarked, tcode="", tdescription="income", tcomment="", ttags=[], tpostings= ["assets:bank:checking" `post` usd 1 ,"income:salary" `post` if explicit then usd (-1) else missingamt ], tprecedingcomment="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=fromGregorian 2008 06 01, tdate2=Nothing, tstatus=Unmarked, tcode="", tdescription="gift", tcomment="", ttags=[], tpostings= ["assets:bank:checking" `post` usd 1 ,"income:gifts" `post` if explicit then usd (-1) else missingamt ], tprecedingcomment="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=fromGregorian 2008 06 02, tdate2=Nothing, tstatus=Unmarked, tcode="", tdescription="save", tcomment="", ttags=[], tpostings= ["assets:bank:saving" `post` usd 1 ,"assets:bank:checking" `post` if explicit then usd (-1) else missingamt ], tprecedingcomment="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=fromGregorian 2008 06 03, tdate2=Nothing, tstatus=Cleared, tcode="", tdescription="eat & shop", tcomment="", ttags=[], tpostings=["expenses:food" `post` usd 1 ,"expenses:supplies" `post` usd 1 ,"assets:cash" `post` if explicit then usd (-2) else missingamt ], tprecedingcomment="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=fromGregorian 2008 10 01, tdate2=Nothing, tstatus=Unmarked, tcode="", tdescription="take a loan", tcomment="", ttags=[], tpostings=["assets:bank:checking" `post` usd 1 ,"liabilities:debts" `post` usd (-1) ], tprecedingcomment="" } , txnTieKnot $ Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=fromGregorian 2008 12 31, tdate2=Nothing, tstatus=Unmarked, tcode="", tdescription="pay off", tcomment="", ttags=[], tpostings=["liabilities:debts" `post` usd 1 ,"assets:bank:checking" `post` if explicit then usd (-1) else missingamt ], tprecedingcomment="" } ] } tests_Journal = testGroup "Journal" [ testCase "journalDateSpan" $ journalDateSpan True nulljournal{ jtxns = [nulltransaction{tdate = fromGregorian 2014 02 01 ,tpostings = [posting{pdate=Just (fromGregorian 2014 01 10)}] } ,nulltransaction{tdate = fromGregorian 2014 09 01 ,tpostings = [posting{pdate2=Just (fromGregorian 2014 10 10)}] } ] } @?= (DateSpan (Just $ Exact $ fromGregorian 2014 1 10) (Just $ Exact $ fromGregorian 2014 10 11)) ] hledger-lib-1.30/Hledger/Data/JournalChecks.hs0000644000000000000000000003206514434445206017327 0ustar0000000000000000{-| Various additional validation checks that can be performed on a Journal. Some are called as part of reading a file in strict mode, others can be called only via the check command. -} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RecordWildCards #-} module Hledger.Data.JournalChecks ( journalCheckAccounts, journalCheckCommodities, journalCheckPayees, journalCheckPairedConversionPostings, journalCheckRecentAssertions, journalCheckTags, module Hledger.Data.JournalChecks.Ordereddates, module Hledger.Data.JournalChecks.Uniqueleafnames, ) where import Data.Char (isSpace) import Data.List.Extra import Data.Maybe import qualified Data.Map.Strict as M import qualified Data.Text as T import Safe (atMay, lastMay) import Text.Printf (printf) import Hledger.Data.Errors import Hledger.Data.Journal import Hledger.Data.JournalChecks.Ordereddates import Hledger.Data.JournalChecks.Uniqueleafnames import Hledger.Data.Posting (isVirtual, postingDate, postingStatus, transactionAllTags) import Hledger.Data.Types import Hledger.Data.Amount (amountIsZero, amountsRaw, missingamt) import Hledger.Data.Transaction (transactionPayee, showTransactionLineFirstPart, partitionAndCheckConversionPostings) import Data.Time (Day, diffDays) import Hledger.Utils -- | Check that all the journal's postings are to accounts with -- account directives, returning an error message otherwise. journalCheckAccounts :: Journal -> Either String () journalCheckAccounts j = mapM_ checkacct (journalPostings j) where checkacct p@Posting{paccount=a} | a `elem` journalAccountNamesDeclared j = Right () | otherwise = Left $ printf (unlines [ "%s:%d:" ,"%s" ,"Strict account checking is enabled, and" ,"account %s has not been declared." ,"Consider adding an account directive. Examples:" ,"" ,"account %s" ,"account %s ; type:A ; (L,E,R,X,C,V)" ]) f l ex (show a) a a where (f,l,_mcols,ex) = makePostingAccountErrorExcerpt p -- | Check that all the commodities used in this journal's postings have been declared -- by commodity directives, returning an error message otherwise. journalCheckCommodities :: Journal -> Either String () journalCheckCommodities j = mapM_ checkcommodities (journalPostings j) where checkcommodities p = case findundeclaredcomm p of Nothing -> Right () Just (comm, _) -> Left $ printf (unlines [ "%s:%d:" ,"%s" ,"Strict commodity checking is enabled, and" ,"commodity %s has not been declared." ,"Consider adding a commodity directive. Examples:" ,"" ,"commodity %s1000.00" ,"commodity 1.000,00 %s" ]) f l ex (show comm) comm comm where (f,l,_mcols,ex) = makePostingErrorExcerpt p finderrcols where -- Find the first undeclared commodity symbol in this posting's amount -- or balance assertion amount, if any. The boolean will be true if -- the undeclared symbol was in the posting amount. findundeclaredcomm :: Posting -> Maybe (CommoditySymbol, Bool) findundeclaredcomm Posting{pamount=amt,pbalanceassertion} = case (findundeclared postingcomms, findundeclared assertioncomms) of (Just c, _) -> Just (c, True) (_, Just c) -> Just (c, False) _ -> Nothing where postingcomms = map acommodity $ filter (not . isIgnorable) $ amountsRaw amt where -- Ignore missing amounts and zero amounts without commodity (#1767) isIgnorable a = (T.null (acommodity a) && amountIsZero a) || a == missingamt assertioncomms = [acommodity a | Just a <- [baamount <$> pbalanceassertion]] findundeclared = find (`M.notMember` jcommodities j) -- Calculate columns suitable for highlighting the excerpt. -- We won't show these in the main error line as they aren't -- accurate for the actual data. -- Find the best position for an error column marker when this posting -- is rendered by showTransaction. -- Reliably locating a problem commodity symbol in showTransaction output -- is really tricky. Some examples: -- -- assets "C $" -1 @ $ 2 -- ^ -- assets $1 = $$1 -- ^ -- assets [ANSI RED]$-1[ANSI RESET] -- ^ -- -- To simplify, we will mark the whole amount + balance assertion region, like: -- assets "C $" -1 @ $ 2 -- ^^^^^^^^^^^^^^ -- XXX refine this region when it's easy finderrcols p' t txntxt = case transactionFindPostingIndex (==p') t of Nothing -> Nothing Just pindex -> Just (amtstart, Just amtend) where tcommentlines = max 0 (length (T.lines $ tcomment t) - 1) errrelline = 1 + tcommentlines + pindex -- XXX doesn't count posting coment lines errline = fromMaybe "" (T.lines txntxt `atMay` (errrelline-1)) acctend = 4 + T.length (paccount p') + if isVirtual p' then 2 else 0 amtstart = acctend + (T.length $ T.takeWhile isSpace $ T.drop acctend errline) + 1 amtend = amtstart + (T.length $ T.stripEnd $ T.takeWhile (/=';') $ T.drop amtstart errline) -- | Check that all the journal's transactions have payees declared with -- payee directives, returning an error message otherwise. journalCheckPayees :: Journal -> Either String () journalCheckPayees j = mapM_ checkpayee (jtxns j) where checkpayee t | payee `elem` journalPayeesDeclared j = Right () | otherwise = Left $ printf (unlines [ "%s:%d:" ,"%s" ,"Strict payee checking is enabled, and" ,"payee %s has not been declared." ,"Consider adding a payee directive. Examples:" ,"" ,"payee %s" ]) f l ex (show payee) payee where payee = transactionPayee t (f,l,_mcols,ex) = makeTransactionErrorExcerpt t finderrcols -- Calculate columns suitable for highlighting the excerpt. -- We won't show these in the main error line as they aren't -- accurate for the actual data. finderrcols t' = Just (col, Just col2) where col = T.length (showTransactionLineFirstPart t') + 2 col2 = col + T.length (transactionPayee t') - 1 -- | Check that all the journal's tags (on accounts, transactions, postings..) -- have been declared with tag directives, returning an error message otherwise. journalCheckTags :: Journal -> Either String () journalCheckTags j = do mapM_ checkaccttags $ jdeclaredaccounts j mapM_ checktxntags $ jtxns j where checkaccttags (a, adi) = mapM_ (checkaccttag.fst) $ aditags adi where checkaccttag tagname | tagname `elem` declaredtags = Right () | otherwise = Left $ printf msg f l ex (show tagname) tagname where (f,l,_mcols,ex) = makeAccountTagErrorExcerpt (a, adi) tagname checktxntags txn = mapM_ (checktxntag . fst) $ transactionAllTags txn where checktxntag tagname | tagname `elem` declaredtags = Right () | otherwise = Left $ printf msg f l ex (show tagname) tagname where (f,l,_mcols,ex) = makeTransactionErrorExcerpt txn finderrcols where finderrcols _txn' = Nothing -- don't bother for now -- Just (col, Just col2) -- where -- col = T.length (showTransactionLineFirstPart txn') + 2 -- col2 = col + T.length tagname - 1 declaredtags = journalTagsDeclared j msg = (unlines [ "%s:%d:" ,"%s" ,"Strict tag checking is enabled, and" ,"tag %s has not been declared." ,"Consider adding a tag directive. Examples:" ,"" ,"tag %s" ]) -- | In each tranaction, check that any conversion postings occur in adjacent pairs. journalCheckPairedConversionPostings :: Journal -> Either String () journalCheckPairedConversionPostings j = mapM_ (transactionCheckPairedConversionPostings (jaccounttypes j)) $ jtxns j transactionCheckPairedConversionPostings :: M.Map AccountName AccountType -> Transaction -> Either String () transactionCheckPairedConversionPostings accttypes t = case partitionAndCheckConversionPostings True accttypes (zip [0..] $ tpostings t) of Left err -> Left $ T.unpack err Right _ -> Right () ---------- -- | Information useful for checking the age and lag of an account's latest balance assertion. data BalanceAssertionInfo = BAI { baiAccount :: AccountName -- ^ the account , baiLatestAssertionPosting :: Posting -- ^ the account's latest posting with a balance assertion , baiLatestAssertionDate :: Day -- ^ the posting date , baiLatestAssertionStatus :: Status -- ^ the posting status , baiLatestPostingDate :: Day -- ^ the date of this account's latest posting with or without a balance assertion } -- | Given a list of postings to the same account, -- if any of them contain a balance assertion, -- calculate the last asserted and posted dates. balanceAssertionInfo :: [Posting] -> Maybe BalanceAssertionInfo balanceAssertionInfo ps = case (mlatestp, mlatestassertp) of (Just latestp, Just latestassertp) -> Just $ BAI{baiAccount = paccount latestassertp ,baiLatestAssertionDate = postingDate latestassertp ,baiLatestAssertionPosting = latestassertp ,baiLatestAssertionStatus = postingStatus latestassertp ,baiLatestPostingDate = postingDate latestp } _ -> Nothing where ps' = sortOn postingDate ps mlatestp = lastMay ps' mlatestassertp = lastMay [p | p@Posting{pbalanceassertion=Just _} <- ps'] -- | The number of days allowed between an account's latest balance assertion -- and latest posting. maxlag = 7 -- | The number of days between this balance assertion and the latest posting in its account. baiLag BAI{..} = diffDays baiLatestPostingDate baiLatestAssertionDate -- -- | The earliest balance assertion date which would satisfy the recentassertions check. -- baiLagOkDate :: BalanceAssertionInfo -> Day -- baiLagOkDate BAI{..} = addDays (-7) baiLatestPostingDate -- | Check that this latest assertion is close enough to the account's latest posting. checkRecentAssertion :: BalanceAssertionInfo -> Either (BalanceAssertionInfo, String) () checkRecentAssertion bai@BAI{..} | lag > maxlag = Left (bai, printf (chomp $ unlines [ "the last balance assertion (%s) was %d days before" ,"the latest posting (%s)." ]) (show baiLatestAssertionDate) lag (show baiLatestPostingDate) ) | otherwise = Right () where lag = baiLag bai -- | Check that all the journal's accounts with balance assertions have -- an assertion no more than 7 days before their latest posting. -- Today's date is provided for error messages. journalCheckRecentAssertions :: Day -> Journal -> Either String () journalCheckRecentAssertions today j = let acctps = groupOn paccount $ sortOn paccount $ journalPostings j acctassertioninfos = mapMaybe balanceAssertionInfo acctps in case mapM_ checkRecentAssertion acctassertioninfos of Right () -> Right () Left (BAI{..}, msg) -> Left errmsg where errmsg = chomp $ printf (unlines [ "%s:", "%s\n", "The recentassertions check is enabled, so accounts with balance assertions must", "have a balance assertion no more than %d days before their latest posting date.", "In account %s,", "%s", "", "%s" ]) (maybe "(no position)" -- shouldn't happen (sourcePosPretty . baposition) $ pbalanceassertion baiLatestAssertionPosting) (textChomp excerpt) maxlag baiAccount msg recommendation where (_,_,_,excerpt) = makeBalanceAssertionErrorExcerpt baiLatestAssertionPosting recommendation = unlines [ "Consider adding a more recent balance assertion for this account. Eg:", "", printf "%s *\n %s $0 = $0 ; <- adjust" (show today) baiAccount ] -- -- | Print the last balance assertion date & status of all accounts with balance assertions. -- printAccountLastAssertions :: Day -> [BalanceAssertionInfo] -> IO () -- printAccountLastAssertions today acctassertioninfos = do -- forM_ acctassertioninfos $ \BAI{..} -> do -- putStr $ printf "%-30s %s %s, %d days ago\n" -- baiAccount -- (if baiLatestClearedAssertionStatus==Unmarked then " " else show baiLatestClearedAssertionStatus) -- (show baiLatestClearedAssertionDate) -- (diffDays today baiLatestClearedAssertionDate) hledger-lib-1.30/Hledger/Data/JournalChecks/Ordereddates.hs0000755000000000000000000000502614434445206021734 0ustar0000000000000000module Hledger.Data.JournalChecks.Ordereddates ( journalCheckOrdereddates ) where import Control.Monad (forM) import Data.List (groupBy) import Text.Printf (printf) import qualified Data.Text as T (pack, unlines) import Hledger.Data.Errors (makeTransactionErrorExcerpt) import Hledger.Data.Transaction (transactionFile, transactionDateOrDate2) import Hledger.Data.Types import Hledger.Utils (textChomp) journalCheckOrdereddates :: WhichDate -> Journal -> Either String () journalCheckOrdereddates whichdate j = do let -- we check date ordering within each file, not across files -- note, relying on txns always being sorted by file here txnsbyfile = groupBy (\t1 t2 -> transactionFile t1 == transactionFile t2) $ jtxns j getdate = transactionDateOrDate2 whichdate compare' a b = getdate a <= getdate b (const $ Right ()) =<< (forM txnsbyfile $ \ts -> case checkTransactions compare' ts of FoldAcc{fa_previous=Nothing} -> Right () FoldAcc{fa_error=Nothing} -> Right () FoldAcc{fa_error=Just t, fa_previous=Just tprev} -> Left $ printf ("%s:%d:\n%s\nOrdered dates checking is enabled, and this transaction's\n" ++ "date%s (%s) is out of order with the previous transaction.\n" ++ "Consider moving this entry into date order, or adjusting its date.") f l ex datenum (show $ getdate t) where (_,_,_,ex1) = makeTransactionErrorExcerpt tprev (const Nothing) (f,l,_,ex2) = makeTransactionErrorExcerpt t finderrcols -- separate the two excerpts by a space-beginning line to help flycheck-hledger parse them ex = T.unlines [textChomp ex1, T.pack " ", textChomp ex2] finderrcols _t = Just (1, Just 10) datenum = if whichdate==SecondaryDate then "2" else "") data FoldAcc a b = FoldAcc { fa_error :: Maybe a , fa_previous :: Maybe b } checkTransactions :: (Transaction -> Transaction -> Bool) -> [Transaction] -> FoldAcc Transaction Transaction checkTransactions compare' = foldWhile f FoldAcc{fa_error=Nothing, fa_previous=Nothing} where f current acc@FoldAcc{fa_previous=Nothing} = acc{fa_previous=Just current} f current acc@FoldAcc{fa_previous=Just previous} = if compare' previous current then acc{fa_previous=Just current} else acc{fa_error=Just current} foldWhile :: (a -> FoldAcc a b -> FoldAcc a b) -> FoldAcc a b -> [a] -> FoldAcc a b foldWhile _ acc [] = acc foldWhile fold acc (a:as) = case fold a acc of acc'@FoldAcc{fa_error=Just _} -> acc' acc' -> foldWhile fold acc' as hledger-lib-1.30/Hledger/Data/JournalChecks/Uniqueleafnames.hs0000755000000000000000000000560714434445206022456 0ustar0000000000000000{-# LANGUAGE OverloadedStrings #-} module Hledger.Data.JournalChecks.Uniqueleafnames ( journalCheckUniqueleafnames ) where import Data.Function (on) import Data.List (groupBy, sortBy) import Data.Text (Text) import qualified Data.Text as T import Text.Printf (printf) import Hledger.Data.AccountName (accountLeafName) import Hledger.Data.Errors (makePostingErrorExcerpt) import Hledger.Data.Journal (journalPostings, journalAccountNamesUsed) import Hledger.Data.Posting (isVirtual) import Hledger.Data.Types import Hledger.Utils (chomp, textChomp) -- | Check that all the journal's postings are to accounts with a unique leaf name. -- Otherwise, return an error message for the first offending posting. journalCheckUniqueleafnames :: Journal -> Either String () journalCheckUniqueleafnames j = do -- find all duplicate leafnames, and the full account names they appear in case finddupes $ journalLeafAndFullAccountNames j of [] -> Right () -- pick the first duplicated leafname and show the transactions of -- the first two postings using it, highlighting the second as the error. (leaf,fulls):_ -> case filter ((`elem` fulls).paccount) $ journalPostings j of ps@(p:p2:_) -> Left $ chomp $ printf ("%s:%d:\n%s\nChecking for unique account leaf names is enabled, and\n" ++"account leaf name %s is not unique.\n" ++"It appears in these account names, which are used in %d places:\n%s" ++"\nConsider changing these account names so their last parts are different." ) f l ex (show leaf) (length ps) accts where -- t = fromMaybe nulltransaction ptransaction -- XXX sloppy (_,_,_,ex1) = makePostingErrorExcerpt p (\_ _ _ -> Nothing) (f,l,_,ex2) = makePostingErrorExcerpt p2 finderrcols -- separate the two excerpts by a space-beginning line to help flycheck-hledger parse them ex = T.unlines [textChomp ex1, T.pack " ...", textChomp ex2] finderrcols p' _ _ = Just (col, Just col2) where a = paccount p' alen = T.length a llen = T.length $ accountLeafName a col = 5 + (if isVirtual p' then 1 else 0) + alen - llen col2 = col + llen - 1 accts = T.unlines fulls _ -> Right () -- shouldn't happen finddupes :: (Ord leaf, Eq full) => [(leaf, full)] -> [(leaf, [full])] finddupes leafandfullnames = zip dupLeafs dupAccountNames where dupLeafs = map (fst . head) d dupAccountNames = map (map snd) d d = dupes' leafandfullnames dupes' = filter ((> 1) . length) . groupBy ((==) `on` fst) . sortBy (compare `on` fst) journalLeafAndFullAccountNames :: Journal -> [(Text, AccountName)] journalLeafAndFullAccountNames = map leafAndAccountName . journalAccountNamesUsed where leafAndAccountName a = (accountLeafName a, a) hledger-lib-1.30/Hledger/Data/Json.hs0000644000000000000000000002444514434445206015510 0ustar0000000000000000{- JSON instances. Should they be in Types.hs ? -} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} module Hledger.Data.Json ( -- * Instances -- * Utilities toJsonText ,writeJsonFile ,readJsonFile ) where import Data.Aeson import Data.Aeson.Encode.Pretty (Config(..), Indent(..), NumberFormat(..), encodePretty', encodePrettyToTextBuilder') --import Data.Aeson.TH import qualified Data.ByteString.Lazy as BL import Data.Decimal (DecimalRaw(..), roundTo) import Data.Maybe (fromMaybe) import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy.Builder as TB import Text.Megaparsec (Pos, SourcePos, mkPos, unPos) import Hledger.Data.Types import Hledger.Data.Amount (amountsRaw, mixed) -- To JSON instance ToJSON Status instance ToJSON SourcePos -- Use the same encoding as the underlying Int instance ToJSON Pos where toJSON = toJSON . unPos toEncoding = toEncoding . unPos -- https://github.com/simonmichael/hledger/issues/1195 -- The default JSON output for Decimal can contain 255-digit integers -- (for repeating decimals caused by implicit transaction prices). -- JSON output is intended to be consumed by diverse apps and -- programming languages, which can't handle numbers like that. -- From #1195: -- -- > - JavaScript uses 64-bit IEEE754 numbers which can only accurately -- > represent integers up to 9007199254740991 (i.e. a maximum of 15 digits). -- > - Java’s largest integers are limited to 18 digits. -- > - Python 3 integers are unbounded. -- > - Python 2 integers are limited to 18 digits like Java. -- > - C and C++ number limits depend on platform — most platforms should -- > be able to represent unsigned integers up to 64 bits, i.e. 19 digits. -- -- What is the best compromise for both accuracy and practicality ? -- For now, we provide both the maximum precision representation -- (decimalPlaces & decimalMantissa), and a floating point representation -- with up to 10 decimal places (and an unbounded number of integer digits). -- We hope the mere presence of the large number in JSON won't break things, -- and that the overall number of significant digits in the floating point -- remains manageable in practice. (I'm not sure how to limit the number -- of significant digits in a Decimal right now.) instance (Integral a, ToJSON a) => ToJSON (DecimalRaw a) where toJSON = object . decimalKV toEncoding = pairs . mconcat . decimalKV decimalKV :: (KeyValue kv, Integral a, ToJSON a) => DecimalRaw a -> [kv] decimalKV d = let d' = if decimalPlaces d <= 10 then d else roundTo 10 d in [ "decimalPlaces" .= decimalPlaces d' , "decimalMantissa" .= decimalMantissa d' , "floatingPoint" .= (realToFrac d' :: Double) ] instance ToJSON Amount instance ToJSON AmountStyle -- Use the same JSON serialisation as Maybe Word8 instance ToJSON AmountPrecision where toJSON = toJSON . \case Precision n -> Just n NaturalPrecision -> Nothing toEncoding = toEncoding . \case Precision n -> Just n NaturalPrecision -> Nothing instance ToJSON Side instance ToJSON DigitGroupStyle instance ToJSON MixedAmount where toJSON = toJSON . amountsRaw toEncoding = toEncoding . amountsRaw instance ToJSON BalanceAssertion instance ToJSON AmountPrice instance ToJSON MarketPrice instance ToJSON PostingType instance ToJSON Posting where toJSON = object . postingKV toEncoding = pairs . mconcat . postingKV postingKV :: KeyValue kv => Posting -> [kv] postingKV Posting{..} = [ "pdate" .= pdate , "pdate2" .= pdate2 , "pstatus" .= pstatus , "paccount" .= paccount , "pamount" .= pamount , "pcomment" .= pcomment , "ptype" .= ptype , "ptags" .= ptags , "pbalanceassertion" .= pbalanceassertion -- To avoid a cycle, show just the parent transaction's index number -- in a dummy field. When re-parsed, there will be no parent. , "ptransaction_" .= maybe "" (show.tindex) ptransaction -- This is probably not wanted in json, we discard it. , "poriginal" .= (Nothing :: Maybe Posting) ] instance ToJSON Transaction instance ToJSON TransactionModifier instance ToJSON TMPostingRule instance ToJSON PeriodicTransaction instance ToJSON PriceDirective instance ToJSON EFDay instance ToJSON DateSpan instance ToJSON Interval instance ToJSON Period instance ToJSON AccountAlias instance ToJSON AccountType instance ToJSONKey AccountType instance ToJSON AccountDeclarationInfo instance ToJSON PayeeDeclarationInfo instance ToJSON TagDeclarationInfo instance ToJSON Commodity instance ToJSON TimeclockCode instance ToJSON TimeclockEntry instance ToJSON Journal instance ToJSON Account where toJSON = object . accountKV toEncoding = pairs . mconcat . accountKV accountKV :: KeyValue kv => Account -> [kv] accountKV a = [ "aname" .= aname a , "aebalance" .= aebalance a , "aibalance" .= aibalance a , "anumpostings" .= anumpostings a , "aboring" .= aboring a -- To avoid a cycle, show just the parent account's name -- in a dummy field. When re-parsed, there will be no parent. , "aparent_" .= maybe "" aname (aparent a) -- Just the names of subaccounts, as a dummy field, ignored when parsed. , "asubs_" .= map aname (asubs a) -- The actual subaccounts (and their subs..), making a (probably highly redundant) tree -- ,"asubs" .= asubs a -- Omit the actual subaccounts , "asubs" .= ([]::[Account]) ] instance ToJSON Ledger -- From JSON instance FromJSON Status instance FromJSON SourcePos -- Use the same encoding as the underlying Int instance FromJSON Pos where parseJSON = fmap mkPos . parseJSON instance FromJSON Amount instance FromJSON AmountStyle -- Use the same JSON serialisation as Maybe Word8 instance FromJSON AmountPrecision where parseJSON = fmap (maybe NaturalPrecision Precision) . parseJSON instance FromJSON Side instance FromJSON DigitGroupStyle instance FromJSON MixedAmount where parseJSON = fmap (mixed :: [Amount] -> MixedAmount) . parseJSON instance FromJSON BalanceAssertion instance FromJSON AmountPrice instance FromJSON MarketPrice instance FromJSON PostingType instance FromJSON Posting instance FromJSON Transaction instance FromJSON AccountDeclarationInfo -- XXX The ToJSON instance replaces subaccounts with just names. -- Here we should try to make use of those to reconstruct the -- parent-child relationships. instance FromJSON Account -- Decimal, various attempts -- -- https://stackoverflow.com/questions/40331851/haskell-data-decimal-as-aeson-type ----instance FromJSON Decimal where parseJSON = ---- A.withScientific "Decimal" (return . right . eitherFromRational . toRational) -- -- https://github.com/bos/aeson/issues/474 -- http://hackage.haskell.org/package/aeson-1.4.2.0/docs/Data-Aeson-TH.html -- $(deriveFromJSON defaultOptions ''Decimal) -- doesn't work -- $(deriveFromJSON defaultOptions ''DecimalRaw) -- works; requires TH, but gives better parse error messages -- -- https://github.com/PaulJohnson/Haskell-Decimal/issues/6 instance FromJSON (DecimalRaw Integer) -- -- @simonmichael, I think the code in your first comment should work if it compiles—though “work” doesn’t mean you can parse a JSON number directly into a `Decimal` using the generic instance, as you’ve discovered. -- --Error messages with these extensions are always rather cryptic, but I’d prefer them to Template Haskell. Typically you’ll want to start by getting a generic `ToJSON` instance working, then use that to figure out what the `FromJSON` instance expects to parse: for a correct instance, `encode` and `decode` should give you an isomorphism between your type and a subset of `Bytestring` (up to the `Maybe` wrapper that `decode` returns). -- --I don’t have time to test it right now, but I think it will also work without `DeriveAnyClass`, just using `DeriveGeneric` and `StandAloneDeriving`. It should also work to use the [`genericParseJSON`](http://hackage.haskell.org/package/aeson/docs/Data-Aeson.html#v:genericParseJSON) function to implement the class explicitly, something like this: -- --{-# LANGUAGE DeriveGeneric #-} --{-# LANGUAGE StandAloneDeriving #-} --import GHC.Generics --import Data.Aeson --deriving instance Generic Decimal --instance FromJSON Decimal where -- parseJSON = genericParseJSON defaultOptions -- --And of course you can avoid `StandAloneDeriving` entirely if you’re willing to wrap `Decimal` in your own `newtype`. -- XXX these will allow reading a Journal, but currently the -- jdeclaredaccounttypes Map gets serialised as a JSON list, which -- can't be read back. -- -- instance FromJSON AccountAlias -- instance FromJSONKey AccountType where fromJSONKey = genericFromJSONKey defaultJSONKeyOptions -- instance FromJSON AccountType -- instance FromJSON ClockTime -- instance FromJSON Commodity -- instance FromJSON DateSpan -- instance FromJSON Interval -- instance FromJSON Period -- instance FromJSON PeriodicTransaction -- instance FromJSON PriceDirective -- instance FromJSON TimeclockCode -- instance FromJSON TimeclockEntry -- instance FromJSON TransactionModifier -- instance FromJSON Journal -- Utilities -- | Config for pretty printing JSON output. jsonConf :: Config jsonConf = Config{confIndent=Spaces 2, confCompare=compare, confNumFormat=Generic, confTrailingNewline=True} -- | Show a JSON-convertible haskell value as pretty-printed JSON text. toJsonText :: ToJSON a => a -> TL.Text toJsonText = TB.toLazyText . encodePrettyToTextBuilder' jsonConf -- | Write a JSON-convertible haskell value to a pretty-printed JSON file. -- Eg: writeJsonFile "a.json" nulltransaction writeJsonFile :: ToJSON a => FilePath -> a -> IO () writeJsonFile f = BL.writeFile f . encodePretty' jsonConf -- | Read a JSON file and decode it to the target type, or raise an error if we can't. -- Eg: readJsonFile "a.json" :: IO Transaction readJsonFile :: FromJSON a => FilePath -> IO a readJsonFile f = do bl <- BL.readFile f -- PARTIAL: let v = fromMaybe (error $ "could not decode JSON in "++show f++" to target value") (decode bl :: Maybe Value) case fromJSON v :: FromJSON a => Result a of Error e -> error e Success t -> return t hledger-lib-1.30/Hledger/Data/Ledger.hs0000644000000000000000000000727714434445206016005 0ustar0000000000000000{-| A 'Ledger' is derived from a 'Journal' by applying a filter specification to select 'Transaction's and 'Posting's of interest. It contains the filtered journal and knows the resulting chart of accounts, account balances, and postings in each account. -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Ledger ( nullledger ,ledgerFromJournal ,ledgerAccountNames ,ledgerAccount ,ledgerRootAccount ,ledgerTopAccounts ,ledgerLeafAccounts ,ledgerPostings ,ledgerDateSpan ,ledgerCommodities ,tests_Ledger ) where import qualified Data.Map as M import Safe (headDef) import Text.Printf import Test.Tasty (testGroup) import Test.Tasty.HUnit ((@?=), testCase) import Hledger.Data.Types import Hledger.Data.Account import Hledger.Data.Journal import Hledger.Query instance Show Ledger where show l = printf "Ledger with %d transactions, %d accounts\n" --"%s" (length (jtxns $ ljournal l) + length (jtxnmodifiers $ ljournal l) + length (jperiodictxns $ ljournal l)) (length $ ledgerAccountNames l) -- (showtree $ ledgerAccountNameTree l) nullledger :: Ledger nullledger = Ledger { ljournal = nulljournal, laccounts = [] } -- | Filter a journal's transactions with the given query, then build -- a "Ledger", containing the journal plus the tree of all its -- accounts with their subaccount-inclusive and subaccount-exclusive -- balances. If the query includes a depth limit, the ledger's journal -- will be depth limited, but the ledger's account tree will not. ledgerFromJournal :: Query -> Journal -> Ledger ledgerFromJournal q j = nullledger{ljournal=j'', laccounts=as} where (q',depthq) = (filterQuery (not . queryIsDepth) q, filterQuery queryIsDepth q) j' = filterJournalAmounts (filterQuery queryIsSym q) $ -- remove amount parts which the query's sym: terms would exclude filterJournalPostings q' j as = accountsFromPostings $ journalPostings j' j'' = filterJournalPostings depthq j' -- | List a ledger's account names. ledgerAccountNames :: Ledger -> [AccountName] ledgerAccountNames = drop 1 . map aname . laccounts -- | Get the named account from a ledger. ledgerAccount :: Ledger -> AccountName -> Maybe Account ledgerAccount l a = lookupAccount a $ laccounts l -- | Get this ledger's root account, which is a dummy "root" account -- above all others. This should always be first in the account list, -- if somehow not this returns a null account. ledgerRootAccount :: Ledger -> Account ledgerRootAccount = headDef nullacct . laccounts -- | List a ledger's top-level accounts (the ones below the root), in tree order. ledgerTopAccounts :: Ledger -> [Account] ledgerTopAccounts = asubs . head . laccounts -- | List a ledger's bottom-level (subaccount-less) accounts, in tree order. ledgerLeafAccounts :: Ledger -> [Account] ledgerLeafAccounts = filter (null.asubs) . laccounts -- | List a ledger's postings, in the order parsed. ledgerPostings :: Ledger -> [Posting] ledgerPostings = journalPostings . ljournal -- | The (fully specified) date span containing all the ledger's (filtered) transactions, -- or DateSpan Nothing Nothing if there are none. ledgerDateSpan :: Ledger -> DateSpan ledgerDateSpan = journalDateSpanBothDates . ljournal -- | All commodities used in this ledger. ledgerCommodities :: Ledger -> [CommoditySymbol] ledgerCommodities = M.keys . jinferredcommodities . ljournal -- tests tests_Ledger = testGroup "Ledger" [ testCase "ledgerFromJournal" $ do length (ledgerPostings $ ledgerFromJournal Any nulljournal) @?= 0 length (ledgerPostings $ ledgerFromJournal Any samplejournal) @?= 13 length (ledgerPostings $ ledgerFromJournal (Depth 2) samplejournal) @?= 7 ] hledger-lib-1.30/Hledger/Data/Period.hs0000644000000000000000000003334214434445206016015 0ustar0000000000000000{-| Manipulate the time periods typically used for reports with Period, a richer abstraction than DateSpan. See also Types and Dates. -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Period ( periodAsDateSpan ,dateSpanAsPeriod ,simplifyPeriod ,isLastDayOfMonth ,isStandardPeriod ,periodTextWidth ,showPeriod ,showPeriodMonthAbbrev ,periodStart ,periodEnd ,periodNext ,periodPrevious ,periodNextIn ,periodPreviousIn ,periodMoveTo ,periodGrow ,periodShrink ,mondayBefore ,yearMonthContainingWeekStarting ,quarterContainingMonth ,firstMonthOfQuarter ,startOfFirstWeekInMonth ) where import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar import Data.Time.Calendar.MonthDay import Data.Time.Calendar.OrdinalDate import Data.Time.Calendar.WeekDate import Data.Time.Format import Text.Printf import Hledger.Data.Types -- | Convert Periods to exact DateSpans. -- -- >>> periodAsDateSpan (MonthPeriod 2000 1) == DateSpan (Just $ Flex $ fromGregorian 2000 1 1) (Just $ Flex $ fromGregorian 2000 2 1) -- True periodAsDateSpan :: Period -> DateSpan periodAsDateSpan (DayPeriod d) = DateSpan (Just $ Exact d) (Just $ Exact $ addDays 1 d) periodAsDateSpan (WeekPeriod b) = DateSpan (Just $ Flex b) (Just $ Flex $ addDays 7 b) periodAsDateSpan (MonthPeriod y m) = DateSpan (Just $ Flex $ fromGregorian y m 1) (Just $ Flex $ fromGregorian y' m' 1) where (y',m') | m==12 = (y+1,1) | otherwise = (y,m+1) periodAsDateSpan (QuarterPeriod y q) = DateSpan (Just $ Flex $ fromGregorian y m 1) (Just $ Flex $ fromGregorian y' m' 1) where (y', q') | q==4 = (y+1,1) | otherwise = (y,q+1) quarterAsMonth q2 = (q2-1) * 3 + 1 m = quarterAsMonth q m' = quarterAsMonth q' periodAsDateSpan (YearPeriod y) = DateSpan (Just $ Flex $ fromGregorian y 1 1) (Just $ Flex $ fromGregorian (y+1) 1 1) periodAsDateSpan (PeriodBetween b e) = DateSpan (Just $ Exact b) (Just $ Exact e) periodAsDateSpan (PeriodFrom b) = DateSpan (Just $ Exact b) Nothing periodAsDateSpan (PeriodTo e) = DateSpan Nothing (Just $ Exact e) periodAsDateSpan (PeriodAll) = DateSpan Nothing Nothing -- | Convert DateSpans to Periods. -- -- >>> dateSpanAsPeriod $ DateSpan (Just $ Exact $ fromGregorian 2000 1 1) (Just $ Exact $ fromGregorian 2000 2 1) -- MonthPeriod 2000 1 dateSpanAsPeriod :: DateSpan -> Period dateSpanAsPeriod (DateSpan (Just b) (Just e)) = simplifyPeriod $ PeriodBetween (fromEFDay b) (fromEFDay e) dateSpanAsPeriod (DateSpan (Just b) Nothing) = PeriodFrom (fromEFDay b) dateSpanAsPeriod (DateSpan Nothing (Just e)) = PeriodTo (fromEFDay e) dateSpanAsPeriod (DateSpan Nothing Nothing) = PeriodAll -- | Convert PeriodBetweens to a more abstract period where possible. -- -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 1 1 1) (fromGregorian 2 1 1) -- YearPeriod 1 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 10 1) (fromGregorian 2001 1 1) -- QuarterPeriod 2000 4 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 1) (fromGregorian 2000 3 1) -- MonthPeriod 2000 2 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2016 7 25) (fromGregorian 2016 8 1) -- WeekPeriod 2016-07-25 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 1 1) (fromGregorian 2000 1 2) -- DayPeriod 2000-01-01 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 28) (fromGregorian 2000 3 1) -- PeriodBetween 2000-02-28 2000-03-01 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 2 29) (fromGregorian 2000 3 1) -- DayPeriod 2000-02-29 -- >>> simplifyPeriod $ PeriodBetween (fromGregorian 2000 12 31) (fromGregorian 2001 1 1) -- DayPeriod 2000-12-31 -- simplifyPeriod :: Period -> Period simplifyPeriod (PeriodBetween b e) = case (toGregorian b, toGregorian e) of -- a year ((by,1,1), (ey,1,1)) | by+1==ey -> YearPeriod by -- a half-year -- ((by,1,1), (ey,7,1)) | by==ey -> -- ((by,7,1), (ey,1,1)) | by+1==ey -> -- a quarter ((by,1,1), (ey,4,1)) | by==ey -> QuarterPeriod by 1 ((by,4,1), (ey,7,1)) | by==ey -> QuarterPeriod by 2 ((by,7,1), (ey,10,1)) | by==ey -> QuarterPeriod by 3 ((by,10,1), (ey,1,1)) | by+1==ey -> QuarterPeriod by 4 -- a month ((by,bm,1), (ey,em,1)) | by==ey && bm+1==em -> MonthPeriod by bm ((by,12,1), (ey,1,1)) | by+1==ey -> MonthPeriod by 12 -- a week (two successive mondays), -- YYYYwN ("week N of year YYYY") -- _ | let ((by,bw,bd), (ey,ew,ed)) = (toWeekDate from, toWeekDate to) in by==ey && fw+1==tw && bd==1 && ed==1 -> -- a week starting on a monday _ | let ((by,bw,bd), (ey,ew,ed)) = (toWeekDate b, toWeekDate (addDays (-1) e)) in by==ey && bw==ew && bd==1 && ed==7 -> WeekPeriod b -- a day ((by,bm,bd), (ey,em,ed)) | (by==ey && bm==em && bd+1==ed) || (by+1==ey && bm==12 && em==1 && bd==31 && ed==1) || -- crossing a year boundary (by==ey && bm+1==em && isLastDayOfMonth by bm bd && ed==1) -- crossing a month boundary -> DayPeriod b _ -> PeriodBetween b e simplifyPeriod p = p isLastDayOfMonth y m d = case m of 1 -> d==31 2 | isLeapYear y -> d==29 | otherwise -> d==28 3 -> d==31 4 -> d==30 5 -> d==31 6 -> d==30 7 -> d==31 8 -> d==31 9 -> d==30 10 -> d==31 11 -> d==30 12 -> d==31 _ -> False -- | Is this period a "standard" period, referencing a particular day, week, month, quarter, or year ? -- Periods of other durations, or infinite duration, or not starting on a standard period boundary, are not. isStandardPeriod = isStandardPeriod' . simplifyPeriod where isStandardPeriod' (DayPeriod _) = True isStandardPeriod' (WeekPeriod _) = True isStandardPeriod' (MonthPeriod _ _) = True isStandardPeriod' (QuarterPeriod _ _) = True isStandardPeriod' (YearPeriod _) = True isStandardPeriod' _ = False -- | The width of a period of this type when displayed. periodTextWidth :: Period -> Int periodTextWidth = periodTextWidth' . simplifyPeriod where periodTextWidth' DayPeriod{} = 10 -- 2021-01-01 periodTextWidth' WeekPeriod{} = 13 -- 2021-01-01W52 periodTextWidth' MonthPeriod{} = 7 -- 2021-01 periodTextWidth' QuarterPeriod{} = 6 -- 2021Q1 periodTextWidth' YearPeriod{} = 4 -- 2021 periodTextWidth' PeriodBetween{} = 22 -- 2021-01-01..2021-01-07 periodTextWidth' PeriodFrom{} = 12 -- 2021-01-01.. periodTextWidth' PeriodTo{} = 12 -- ..2021-01-01 periodTextWidth' PeriodAll = 2 -- .. -- | Render a period as a compact display string suitable for user output. -- -- >>> showPeriod (WeekPeriod (fromGregorian 2016 7 25)) -- "2016-07-25W30" showPeriod :: Period -> Text showPeriod (DayPeriod b) = T.pack $ formatTime defaultTimeLocale "%F" b -- DATE showPeriod (WeekPeriod b) = T.pack $ formatTime defaultTimeLocale "%FW%V" b -- STARTDATEWYEARWEEK showPeriod (MonthPeriod y m) = T.pack $ printf "%04d-%02d" y m -- YYYY-MM showPeriod (QuarterPeriod y q) = T.pack $ printf "%04dQ%d" y q -- YYYYQN showPeriod (YearPeriod y) = T.pack $ printf "%04d" y -- YYYY showPeriod (PeriodBetween b e) = T.pack $ formatTime defaultTimeLocale "%F" b ++ formatTime defaultTimeLocale "..%F" (addDays (-1) e) -- STARTDATE..INCLUSIVEENDDATE showPeriod (PeriodFrom b) = T.pack $ formatTime defaultTimeLocale "%F.." b -- STARTDATE.. showPeriod (PeriodTo e) = T.pack $ formatTime defaultTimeLocale "..%F" (addDays (-1) e) -- ..INCLUSIVEENDDATE showPeriod PeriodAll = ".." -- | Like showPeriod, but if it's a month period show just -- the 3 letter month name abbreviation for the current locale. showPeriodMonthAbbrev :: Period -> Text showPeriodMonthAbbrev (MonthPeriod _ m) -- Jan | m > 0 && m <= length monthnames = T.pack . snd $ monthnames !! (m-1) where monthnames = months defaultTimeLocale showPeriodMonthAbbrev p = showPeriod p periodStart :: Period -> Maybe Day periodStart p = fromEFDay <$> mb where DateSpan mb _ = periodAsDateSpan p periodEnd :: Period -> Maybe Day periodEnd p = fromEFDay <$> me where DateSpan _ me = periodAsDateSpan p -- | Move a standard period to the following period of same duration. -- Non-standard periods are unaffected. periodNext :: Period -> Period periodNext (DayPeriod b) = DayPeriod (addDays 1 b) periodNext (WeekPeriod b) = WeekPeriod (addDays 7 b) periodNext (MonthPeriod y 12) = MonthPeriod (y+1) 1 periodNext (MonthPeriod y m) = MonthPeriod y (m+1) periodNext (QuarterPeriod y 4) = QuarterPeriod (y+1) 1 periodNext (QuarterPeriod y q) = QuarterPeriod y (q+1) periodNext (YearPeriod y) = YearPeriod (y+1) periodNext p = p -- | Move a standard period to the preceding period of same duration. -- Non-standard periods are unaffected. periodPrevious :: Period -> Period periodPrevious (DayPeriod b) = DayPeriod (addDays (-1) b) periodPrevious (WeekPeriod b) = WeekPeriod (addDays (-7) b) periodPrevious (MonthPeriod y 1) = MonthPeriod (y-1) 12 periodPrevious (MonthPeriod y m) = MonthPeriod y (m-1) periodPrevious (QuarterPeriod y 1) = QuarterPeriod (y-1) 4 periodPrevious (QuarterPeriod y q) = QuarterPeriod y (q-1) periodPrevious (YearPeriod y) = YearPeriod (y-1) periodPrevious p = p -- | Move a standard period to the following period of same duration, staying within enclosing dates. -- Non-standard periods are unaffected. periodNextIn :: DateSpan -> Period -> Period periodNextIn (DateSpan _ (Just e0)) p = case mb of Just b -> if b < e then p' else p _ -> p where e = fromEFDay e0 p' = periodNext p mb = periodStart p' periodNextIn _ p = periodNext p -- | Move a standard period to the preceding period of same duration, staying within enclosing dates. -- Non-standard periods are unaffected. periodPreviousIn :: DateSpan -> Period -> Period periodPreviousIn (DateSpan (Just b0) _) p = case me of Just e -> if e > b then p' else p _ -> p where b = fromEFDay b0 p' = periodPrevious p me = periodEnd p' periodPreviousIn _ p = periodPrevious p -- | Move a standard period stepwise so that it encloses the given date. -- Non-standard periods are unaffected. periodMoveTo :: Day -> Period -> Period periodMoveTo d (DayPeriod _) = DayPeriod d periodMoveTo d (WeekPeriod _) = WeekPeriod $ mondayBefore d periodMoveTo d (MonthPeriod _ _) = MonthPeriod y m where (y,m,_) = toGregorian d periodMoveTo d (QuarterPeriod _ _) = QuarterPeriod y q where (y,m,_) = toGregorian d q = quarterContainingMonth m periodMoveTo d (YearPeriod _) = YearPeriod y where (y,_,_) = toGregorian d periodMoveTo _ p = p -- | Enlarge a standard period to the next larger enclosing standard period, if there is one. -- Eg, a day becomes the enclosing week. -- A week becomes whichever month the week's thursday falls into. -- A year becomes all (unlimited). -- Non-standard periods (arbitrary dates, or open-ended) are unaffected. periodGrow :: Period -> Period periodGrow (DayPeriod b) = WeekPeriod $ mondayBefore b periodGrow (WeekPeriod b) = MonthPeriod y m where (y,m) = yearMonthContainingWeekStarting b periodGrow (MonthPeriod y m) = QuarterPeriod y (quarterContainingMonth m) periodGrow (QuarterPeriod y _) = YearPeriod y periodGrow (YearPeriod _) = PeriodAll periodGrow p = p -- | Shrink a period to the next smaller standard period inside it, -- choosing the subperiod which contains today's date if possible, -- otherwise the first subperiod. It goes like this: -- unbounded periods and nonstandard periods (between two arbitrary dates) -> -- current year -> -- current quarter if it's in selected year, otherwise first quarter of selected year -> -- current month if it's in selected quarter, otherwise first month of selected quarter -> -- current week if it's in selected month, otherwise first week of selected month -> -- today if it's in selected week, otherwise first day of selected week, -- unless that's in previous month, in which case first day of month containing selected week. -- Shrinking a day has no effect. periodShrink :: Day -> Period -> Period periodShrink _ p@(DayPeriod _) = p periodShrink today (WeekPeriod b) | today >= b && diffDays today b < 7 = DayPeriod today | m /= weekmonth = DayPeriod $ fromGregorian weekyear weekmonth 1 | otherwise = DayPeriod b where (_,m,_) = toGregorian b (weekyear,weekmonth) = yearMonthContainingWeekStarting b periodShrink today (MonthPeriod y m) | (y',m') == (y,m) = WeekPeriod $ mondayBefore today | otherwise = WeekPeriod $ startOfFirstWeekInMonth y m where (y',m',_) = toGregorian today periodShrink today (QuarterPeriod y q) | quarterContainingMonth thismonth == q = MonthPeriod y thismonth | otherwise = MonthPeriod y (firstMonthOfQuarter q) where (_,thismonth,_) = toGregorian today periodShrink today (YearPeriod y) | y == thisyear = QuarterPeriod y thisquarter | otherwise = QuarterPeriod y 1 where (thisyear,thismonth,_) = toGregorian today thisquarter = quarterContainingMonth thismonth periodShrink today _ = YearPeriod y where (y,_,_) = toGregorian today mondayBefore d = addDays (1 - toInteger wd) d where (_,_,wd) = toWeekDate d yearMonthContainingWeekStarting weekstart = (y,m) where thu = addDays 3 weekstart (y,yd) = toOrdinalDate thu (m,_) = dayOfYearToMonthAndDay (isLeapYear y) yd quarterContainingMonth m = (m-1) `div` 3 + 1 firstMonthOfQuarter q = (q-1)*3 + 1 startOfFirstWeekInMonth y m | monthstartday <= 4 = mon | otherwise = addDays 7 mon -- month starts with a fri/sat/sun where monthstart = fromGregorian y m 1 mon = mondayBefore monthstart (_,_,monthstartday) = toWeekDate monthstart hledger-lib-1.30/Hledger/Data/PeriodicTransaction.hs0000644000000000000000000002356614434445206020546 0ustar0000000000000000{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-| A 'PeriodicTransaction' is a rule describing recurring transactions. -} module Hledger.Data.PeriodicTransaction ( runPeriodicTransaction , checkPeriodicTransactionStartDate ) where import Data.Function ((&)) import Data.Maybe (isNothing) import qualified Data.Text as T import qualified Data.Text.IO as T import Text.Printf import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Amount import Hledger.Data.Posting (post, commentAddTagNextLine) import Hledger.Data.Transaction -- $setup -- >>> :set -XOverloadedStrings -- >>> import Hledger.Data.Posting -- >>> import Hledger.Data.Journal -- doctest helper, too much hassle to define in the comment -- XXX duplicates some logic in periodictransactionp _ptgen str = do let t = T.pack str (i,s) = parsePeriodExpr' nulldate t mapM_ (T.putStr . showTransaction) $ runPeriodicTransaction True nullperiodictransaction{ ptperiodexpr=t , ptspan=s, ptinterval=i, ptpostings=["a" `post` usd 1] } nulldatespan _ptgenspan str spn = do let t = T.pack str (i,s) = parsePeriodExpr' nulldate t mapM_ (T.putStr . showTransaction) $ runPeriodicTransaction True nullperiodictransaction{ ptperiodexpr=t , ptspan=s, ptinterval=i, ptpostings=["a" `post` usd 1] } spn --deriving instance Show PeriodicTransaction -- for better pretty-printing: instance Show PeriodicTransaction where show PeriodicTransaction{..} = printf "PeriodicTransactionPP {%s, %s, %s, %s, %s, %s, %s, %s, %s, %s}" -- Warning, be careful to keep these synced ^ v ("ptperiodexpr=" ++ show ptperiodexpr) ("ptinterval=" ++ show ptinterval) ("ptspan=" ++ show (show ptspan)) ("ptsourcepos=" ++ show ptsourcepos) ("ptstatus=" ++ show (show ptstatus)) ("ptcode=" ++ show ptcode) ("ptdescription=" ++ show ptdescription) ("ptcomment=" ++ show ptcomment) ("pttags=" ++ show pttags) ("ptpostings=" ++ show ptpostings) -- A basic human-readable rendering. --showPeriodicTransaction t = "~ " ++ T.unpack (ptperiodexpr t) ++ "\n" ++ unlines (map show (ptpostings t)) --nullperiodictransaction is defined in Types.hs -- | Generate transactions from 'PeriodicTransaction' within a 'DateSpan'. -- This should be a closed span with both start and end dates specified; -- an open ended span will generate no transactions. -- -- Note that new transactions require 'txnTieKnot' post-processing. -- The new transactions will have three tags added: -- - a recur:PERIODICEXPR tag whose value is the generating periodic expression -- - a generated-transaction: tag -- - a hidden _generated-transaction: tag which does not appear in the comment. -- -- >>> import Data.Time (fromGregorian) -- >>> _ptgen "monthly from 2017/1 to 2017/4" -- 2017-01-01 -- ; generated-transaction: ~ monthly from 2017/1 to 2017/4 -- a $1.00 -- -- 2017-02-01 -- ; generated-transaction: ~ monthly from 2017/1 to 2017/4 -- a $1.00 -- -- 2017-03-01 -- ; generated-transaction: ~ monthly from 2017/1 to 2017/4 -- a $1.00 -- -- -- >>> _ptgen "monthly from 2017/1 to 2017/5" -- 2017-01-01 -- ; generated-transaction: ~ monthly from 2017/1 to 2017/5 -- a $1.00 -- -- 2017-02-01 -- ; generated-transaction: ~ monthly from 2017/1 to 2017/5 -- a $1.00 -- -- 2017-03-01 -- ; generated-transaction: ~ monthly from 2017/1 to 2017/5 -- a $1.00 -- -- 2017-04-01 -- ; generated-transaction: ~ monthly from 2017/1 to 2017/5 -- a $1.00 -- -- -- >>> _ptgen "every 2nd day of month from 2017/02 to 2017/04" -- 2017-01-02 -- ; generated-transaction: ~ every 2nd day of month from 2017/02 to 2017/04 -- a $1.00 -- -- 2017-02-02 -- ; generated-transaction: ~ every 2nd day of month from 2017/02 to 2017/04 -- a $1.00 -- -- 2017-03-02 -- ; generated-transaction: ~ every 2nd day of month from 2017/02 to 2017/04 -- a $1.00 -- -- -- >>> _ptgen "every 30th day of month from 2017/1 to 2017/5" -- 2016-12-30 -- ; generated-transaction: ~ every 30th day of month from 2017/1 to 2017/5 -- a $1.00 -- -- 2017-01-30 -- ; generated-transaction: ~ every 30th day of month from 2017/1 to 2017/5 -- a $1.00 -- -- 2017-02-28 -- ; generated-transaction: ~ every 30th day of month from 2017/1 to 2017/5 -- a $1.00 -- -- 2017-03-30 -- ; generated-transaction: ~ every 30th day of month from 2017/1 to 2017/5 -- a $1.00 -- -- 2017-04-30 -- ; generated-transaction: ~ every 30th day of month from 2017/1 to 2017/5 -- a $1.00 -- -- -- >>> _ptgen "every 2nd Thursday of month from 2017/1 to 2017/4" -- 2016-12-08 -- ; generated-transaction: ~ every 2nd Thursday of month from 2017/1 to 2017/4 -- a $1.00 -- -- 2017-01-12 -- ; generated-transaction: ~ every 2nd Thursday of month from 2017/1 to 2017/4 -- a $1.00 -- -- 2017-02-09 -- ; generated-transaction: ~ every 2nd Thursday of month from 2017/1 to 2017/4 -- a $1.00 -- -- 2017-03-09 -- ; generated-transaction: ~ every 2nd Thursday of month from 2017/1 to 2017/4 -- a $1.00 -- -- -- >>> _ptgen "every nov 29th from 2017 to 2019" -- 2016-11-29 -- ; generated-transaction: ~ every nov 29th from 2017 to 2019 -- a $1.00 -- -- 2017-11-29 -- ; generated-transaction: ~ every nov 29th from 2017 to 2019 -- a $1.00 -- -- 2018-11-29 -- ; generated-transaction: ~ every nov 29th from 2017 to 2019 -- a $1.00 -- -- -- >>> _ptgen "2017/1" -- 2017-01-01 -- ; generated-transaction: ~ 2017/1 -- a $1.00 -- -- -- >>> let reportperiod="daily from 2018/01/03" in let (i,s) = parsePeriodExpr' nulldate reportperiod in runPeriodicTransaction True (nullperiodictransaction{ptperiodexpr=reportperiod, ptspan=s, ptinterval=i, ptpostings=["a" `post` usd 1]}) (DateSpan (Just $ Flex $ fromGregorian 2018 01 01) (Just $ Flex $ fromGregorian 2018 01 03)) -- [] -- -- >>> _ptgenspan "every 3 months from 2019-05" (DateSpan (Just $ Flex $ fromGregorian 2020 01 01) (Just $ Flex $ fromGregorian 2020 02 01)) -- -- >>> _ptgenspan "every 3 months from 2019-05" (DateSpan (Just $ Flex $ fromGregorian 2020 02 01) (Just $ Flex $ fromGregorian 2020 03 01)) -- 2020-02-01 -- ; generated-transaction: ~ every 3 months from 2019-05 -- a $1.00 -- -- >>> _ptgenspan "every 3 days from 2018" (DateSpan (Just $ Flex $ fromGregorian 2018 01 01) (Just $ Flex $ fromGregorian 2018 01 05)) -- 2018-01-01 -- ; generated-transaction: ~ every 3 days from 2018 -- a $1.00 -- -- 2018-01-04 -- ; generated-transaction: ~ every 3 days from 2018 -- a $1.00 -- -- >>> _ptgenspan "every 3 days from 2018" (DateSpan (Just $ Flex $ fromGregorian 2018 01 02) (Just $ Flex $ fromGregorian 2018 01 05)) -- 2018-01-04 -- ; generated-transaction: ~ every 3 days from 2018 -- a $1.00 -- runPeriodicTransaction :: Bool -> PeriodicTransaction -> DateSpan -> [Transaction] runPeriodicTransaction verbosetags PeriodicTransaction{..} requestedspan = [ t{tdate=d} | (DateSpan (Just efd) _) <- alltxnspans, let d = fromEFDay efd, spanContainsDate requestedspan d ] where t = nulltransaction{ tsourcepos = ptsourcepos ,tstatus = ptstatus ,tcode = ptcode ,tdescription = ptdescription ,tcomment = ptcomment & (if verbosetags then (`commentAddTagNextLine` ("generated-transaction",period)) else id) ,ttags = pttags & (("_generated-transaction",period) :) & (if verbosetags then (("generated-transaction" ,period) :) else id) ,tpostings = ptpostings } period = "~ " <> ptperiodexpr -- All the date spans described by this periodic transaction rule. alltxnspans = splitSpan adjust ptinterval span' where -- If the PT does not specify start or end dates, we take them from the requestedspan. span' = ptspan `spanDefaultsFrom` requestedspan -- Unless the PT specified a start date explicitly, we will adjust the start date to the previous interval boundary. adjust = isNothing $ spanStart span' -- | Check that this date span begins at a boundary of this interval, -- or return an explanatory error message including the provided period expression -- (from which the span and interval are derived). checkPeriodicTransactionStartDate :: Interval -> DateSpan -> T.Text -> Maybe String checkPeriodicTransactionStartDate i s periodexpr = case (i, spanStart s) of (Weeks _, Just d) -> checkStart d Week (Months _, Just d) -> checkStart d Month (Quarters _, Just d) -> checkStart d Quarter (Years _, Just d) -> checkStart d Year _ -> Nothing where checkStart d x = let firstDate = fromEFDay $ fixSmartDate d $ SmartRelative 0 x in if d == firstDate then Nothing else Just $ "Unable to generate transactions according to "++show (T.unpack periodexpr) ++" because "++show d++" is not a first day of the "++show x ---- | What is the interval of this 'PeriodicTransaction's period expression, if it can be parsed ? --periodTransactionInterval :: PeriodicTransaction -> Maybe Interval --periodTransactionInterval pt = -- let -- expr = ptperiodexpr pt -- err = error' $ "Current date cannot be referenced in " ++ show (T.unpack expr) -- in -- case parsePeriodExpr err expr of -- Left _ -> Nothing -- Right (i,_) -> Just i hledger-lib-1.30/Hledger/Data/StringFormat.hs0000644000000000000000000002247714434445206017221 0ustar0000000000000000-- | Parse format strings provided by --format, with awareness of -- hledger's report item fields. The formats are used by -- report-specific renderers like renderBalanceReportItem. {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE TypeFamilies #-} module Hledger.Data.StringFormat ( parseStringFormat , defaultStringFormatStyle , StringFormat(..) , StringFormatComponent(..) , ReportItemField(..) , defaultBalanceLineFormat , tests_StringFormat ) where import Numeric (readDec) import Data.Char (isPrint) import Data.Default (Default(..)) import Data.Maybe (isJust) import Data.Text (Text) import qualified Data.Text as T import Text.Megaparsec import Text.Megaparsec.Char (char, digitChar, string) import Hledger.Utils.Parse (SimpleTextParser) import Hledger.Utils.Text (formatText) import Hledger.Utils.Test -- | A format specification/template to use when rendering a report line item as text. -- -- A format is a sequence of components; each is either a literal -- string, or a hledger report item field with specified width and -- justification whose value will be interpolated at render time. -- -- A component's value may be a multi-line string (or a -- multi-commodity amount), in which case the final string will be -- either single-line or a top or bottom-aligned multi-line string -- depending on the StringFormat variant used. -- -- Currently this is only used in the balance command's single-column -- mode, which provides a limited StringFormat renderer. -- data StringFormat = OneLine [StringFormatComponent] -- ^ multi-line values will be rendered on one line, comma-separated | TopAligned [StringFormatComponent] -- ^ values will be top-aligned (and bottom-padded to the same height) | BottomAligned [StringFormatComponent] -- ^ values will be bottom-aligned (and top-padded) deriving (Show, Eq) data StringFormatComponent = FormatLiteral Text -- ^ Literal text to be rendered as-is | FormatField Bool (Maybe Int) (Maybe Int) ReportItemField -- ^ A data field to be formatted and interpolated. Parameters: -- -- - Left justify ? Right justified if false -- - Minimum width ? Will be space-padded if narrower than this -- - Maximum width ? Will be clipped if wider than this -- - Which of the standard hledger report item fields to interpolate deriving (Show, Eq) -- | An id identifying which report item field to interpolate. These -- are drawn from several hledger report types, so are not all -- applicable for a given report. data ReportItemField = AccountField -- ^ A posting or balance report item's account name | DefaultDateField -- ^ A posting or register or entry report item's date | DescriptionField -- ^ A posting or register or entry report item's description | TotalField -- ^ A balance or posting report item's balance or running total. -- Always rendered right-justified. | DepthSpacerField -- ^ A balance report item's indent level (which may be different from the account name depth). -- Rendered as this number of spaces, multiplied by the minimum width spec if any. | FieldNo Int -- ^ A report item's nth field. May be unimplemented. deriving (Show, Eq) instance Default StringFormat where def = defaultBalanceLineFormat -- | Default line format for balance report: "%20(total) %2(depth_spacer)%-(account)" defaultBalanceLineFormat :: StringFormat defaultBalanceLineFormat = BottomAligned [ FormatField False (Just 20) Nothing TotalField , FormatLiteral " " , FormatField True (Just 2) Nothing DepthSpacerField , FormatField True Nothing Nothing AccountField ] ---------------------------------------------------------------------- -- renderStringFormat :: StringFormat -> Map String String -> String -- renderStringFormat fmt params = ---------------------------------------------------------------------- -- | Parse a string format specification, or return a parse error. parseStringFormat :: Text -> Either String StringFormat parseStringFormat input = case (runParser (stringformatp <* eof) "(unknown)") input of Left y -> Left $ show y Right x -> Right x defaultStringFormatStyle = BottomAligned stringformatp :: SimpleTextParser StringFormat stringformatp = do alignspec <- optional (try $ char '%' >> oneOf ("^_,"::String)) let constructor = case alignspec of Just '^' -> TopAligned Just '_' -> BottomAligned Just ',' -> OneLine _ -> defaultStringFormatStyle constructor <$> many componentp componentp :: SimpleTextParser StringFormatComponent componentp = formatliteralp <|> formatfieldp formatliteralp :: SimpleTextParser StringFormatComponent formatliteralp = do s <- T.pack <$> some c return $ FormatLiteral s where isPrintableButNotPercentage x = isPrint x && x /= '%' c = (satisfy isPrintableButNotPercentage "printable character") <|> try (string "%%" >> return '%') formatfieldp :: SimpleTextParser StringFormatComponent formatfieldp = do char '%' leftJustified <- optional (char '-') minWidth <- optional (some $ digitChar) maxWidth <- optional (do char '.'; some $ digitChar) -- TODO: Can this be (char '1') *> (some digitChar) char '(' f <- fieldp char ')' return $ FormatField (isJust leftJustified) (parseDec minWidth <|> Just 0) (parseDec maxWidth) f where parseDec s = case s of Just text -> Just m where ((m,_):_) = readDec text _ -> Nothing fieldp :: SimpleTextParser ReportItemField fieldp = do try (string "account" >> return AccountField) <|> try (string "depth_spacer" >> return DepthSpacerField) <|> try (string "date" >> return DescriptionField) <|> try (string "description" >> return DescriptionField) <|> try (string "total" >> return TotalField) <|> try ((FieldNo . read) <$> some digitChar) ---------------------------------------------------------------------- formatStringTester fs value expected = actual @?= expected where actual = case fs of FormatLiteral l -> formatText False Nothing Nothing l FormatField leftJustify mn mx _ -> formatText leftJustify mn mx value tests_StringFormat = testGroup "StringFormat" [ testCase "formatStringHelper" $ do formatStringTester (FormatLiteral " ") "" " " formatStringTester (FormatField False Nothing Nothing DescriptionField) "description" "description" formatStringTester (FormatField False (Just 20) Nothing DescriptionField) "description" " description" formatStringTester (FormatField False Nothing (Just 20) DescriptionField) "description" "description" formatStringTester (FormatField True Nothing (Just 20) DescriptionField) "description" "description" formatStringTester (FormatField True (Just 20) Nothing DescriptionField) "description" "description " formatStringTester (FormatField True (Just 20) (Just 20) DescriptionField) "description" "description " formatStringTester (FormatField True Nothing (Just 3) DescriptionField) "description" "des" ,let s `gives` expected = testCase s $ parseStringFormat (T.pack s) @?= Right expected in testGroup "parseStringFormat" [ "" `gives` (defaultStringFormatStyle []) , "D" `gives` (defaultStringFormatStyle [FormatLiteral "D"]) , "%(date)" `gives` (defaultStringFormatStyle [FormatField False (Just 0) Nothing DescriptionField]) , "%(total)" `gives` (defaultStringFormatStyle [FormatField False (Just 0) Nothing TotalField]) -- TODO -- , "^%(total)" `gives` (TopAligned [FormatField False Nothing Nothing TotalField]) -- , "_%(total)" `gives` (BottomAligned [FormatField False Nothing Nothing TotalField]) -- , ",%(total)" `gives` (OneLine [FormatField False Nothing Nothing TotalField]) , "Hello %(date)!" `gives` (defaultStringFormatStyle [FormatLiteral "Hello ", FormatField False (Just 0) Nothing DescriptionField, FormatLiteral "!"]) , "%-(date)" `gives` (defaultStringFormatStyle [FormatField True (Just 0) Nothing DescriptionField]) , "%20(date)" `gives` (defaultStringFormatStyle [FormatField False (Just 20) Nothing DescriptionField]) , "%.10(date)" `gives` (defaultStringFormatStyle [FormatField False (Just 0) (Just 10) DescriptionField]) , "%20.10(date)" `gives` (defaultStringFormatStyle [FormatField False (Just 20) (Just 10) DescriptionField]) , "%20(account) %.10(total)" `gives` (defaultStringFormatStyle [FormatField False (Just 20) Nothing AccountField ,FormatLiteral " " ,FormatField False (Just 0) (Just 10) TotalField ]) , testCase "newline not parsed" $ assertLeft $ parseStringFormat "\n" ] ] hledger-lib-1.30/Hledger/Data/Posting.hs0000644000000000000000000005404014434445206016214 0ustar0000000000000000{-# LANGUAGE NamedFieldPuns #-} {-| A 'Posting' represents a change (by some 'MixedAmount') of the balance in some 'Account'. Each 'Transaction' contains two or more postings which should add up to 0. Postings reference their parent transaction, so we can look up the date or description there. -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Posting ( -- * Posting nullposting, posting, post, vpost, post', vpost', nullsourcepos, nullassertion, balassert, balassertTot, balassertParInc, balassertTotInc, -- * operations originalPosting, postingStatus, isReal, isVirtual, isBalancedVirtual, isEmptyPosting, hasBalanceAssignment, hasAmount, postingAllTags, transactionAllTags, relatedPostings, postingStripPrices, postingApplyAliases, postingApplyCommodityStyles, postingAddTags, -- * date operations postingDate, postingDate2, postingDateOrDate2, isPostingInDateSpan, isPostingInDateSpan', -- * account name operations accountNamesFromPostings, -- * comment/tag operations commentJoin, commentAddTag, commentAddTagNextLine, -- * arithmetic sumPostings, -- * rendering showPosting, showPostingLines, postingAsLines, postingsAsLines, showAccountName, renderCommentLines, showBalanceAssertion, -- * misc. postingTransformAmount, postingApplyValuation, postingToCost, postingAddInferredEquityPostings, postingPriceDirectivesFromCost, tests_Posting ) where import Data.Default (def) import Data.Foldable (asum) import Data.Function ((&)) import qualified Data.Map as M import Data.Maybe (fromMaybe, isJust, mapMaybe) import Data.List (foldl', sort, union) import qualified Data.Set as S import Data.Text (Text) import qualified Data.Text as T import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy.Builder as TB import Data.Time.Calendar (Day) import Safe (maximumBound) import Text.DocLayout (realLength) import Text.Tabular.AsciiWide hiding (render) import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Amount import Hledger.Data.AccountName import Hledger.Data.Dates (nulldate, spanContainsDate) import Hledger.Data.Valuation nullposting, posting :: Posting nullposting = Posting {pdate=Nothing ,pdate2=Nothing ,pstatus=Unmarked ,paccount="" ,pamount=nullmixedamt ,pcomment="" ,ptype=RegularPosting ,ptags=[] ,pbalanceassertion=Nothing ,ptransaction=Nothing ,poriginal=Nothing } posting = nullposting -- constructors -- | Make a posting to an account. post :: AccountName -> Amount -> Posting post acc amt = posting {paccount=acc, pamount=mixedAmount amt} -- | Make a virtual (unbalanced) posting to an account. vpost :: AccountName -> Amount -> Posting vpost acc amt = (post acc amt){ptype=VirtualPosting} -- | Make a posting to an account, maybe with a balance assertion. post' :: AccountName -> Amount -> Maybe BalanceAssertion -> Posting post' acc amt ass = posting {paccount=acc, pamount=mixedAmount amt, pbalanceassertion=ass} -- | Make a virtual (unbalanced) posting to an account, maybe with a balance assertion. vpost' :: AccountName -> Amount -> Maybe BalanceAssertion -> Posting vpost' acc amt ass = (post' acc amt ass){ptype=VirtualPosting, pbalanceassertion=ass} nullsourcepos :: (SourcePos, SourcePos) nullsourcepos = (SourcePos "" (mkPos 1) (mkPos 1), SourcePos "" (mkPos 2) (mkPos 1)) nullassertion :: BalanceAssertion nullassertion = BalanceAssertion {baamount=nullamt ,batotal=False ,bainclusive=False ,baposition=initialPos "" } -- | Make a partial, exclusive balance assertion. balassert :: Amount -> Maybe BalanceAssertion balassert amt = Just $ nullassertion{baamount=amt} -- | Make a total, exclusive balance assertion. balassertTot :: Amount -> Maybe BalanceAssertion balassertTot amt = Just $ nullassertion{baamount=amt, batotal=True} -- | Make a partial, inclusive balance assertion. balassertParInc :: Amount -> Maybe BalanceAssertion balassertParInc amt = Just $ nullassertion{baamount=amt, bainclusive=True} -- | Make a total, inclusive balance assertion. balassertTotInc :: Amount -> Maybe BalanceAssertion balassertTotInc amt = Just $ nullassertion{baamount=amt, batotal=True, bainclusive=True} -- | Render a balance assertion, as the =[=][*] symbol and expected amount. showBalanceAssertion :: BalanceAssertion -> WideBuilder showBalanceAssertion ba = singleton '=' <> eq <> ast <> singleton ' ' <> showAmountB def{displayZeroCommodity=True} (baamount ba) where eq = if batotal ba then singleton '=' else mempty ast = if bainclusive ba then singleton '*' else mempty singleton c = WideBuilder (TB.singleton c) 1 -- Get the original posting, if any. originalPosting :: Posting -> Posting originalPosting p = fromMaybe p $ poriginal p showPosting :: Posting -> String showPosting p = T.unpack . T.unlines $ postingsAsLines False [p] -- | Render a posting, at the appropriate width for aligning with -- its siblings if any. Used by the rewrite command. showPostingLines :: Posting -> [Text] showPostingLines p = first3 $ postingAsLines False False maxacctwidth maxamtwidth p where linesWithWidths = map (postingAsLines False False maxacctwidth maxamtwidth) . maybe [p] tpostings $ ptransaction p maxacctwidth = maximumBound 0 $ map second3 linesWithWidths maxamtwidth = maximumBound 0 $ map third3 linesWithWidths -- | Given a transaction and its postings, render the postings, suitable -- for `print` output. Normally this output will be valid journal syntax which -- hledger can reparse (though it may include no-longer-valid balance assertions). -- -- Explicit amounts are shown, any implicit amounts are not. -- -- Postings with multicommodity explicit amounts are handled as follows: -- if onelineamounts is true, these amounts are shown on one line, -- comma-separated, and the output will not be valid journal syntax. -- Otherwise, they are shown as several similar postings, one per commodity. -- When the posting has a balance assertion, it is attached to the last of these postings. -- -- The output will appear to be a balanced transaction. -- Amounts' display precisions, which may have been limited by commodity -- directives, will be increased if necessary to ensure this. -- -- Posting amounts will be aligned with each other, starting about 4 columns -- beyond the widest account name (see postingAsLines for details). postingsAsLines :: Bool -> [Posting] -> [Text] postingsAsLines onelineamounts ps = concatMap first3 linesWithWidths where linesWithWidths = map (postingAsLines False onelineamounts maxacctwidth maxamtwidth) ps maxacctwidth = maximumBound 0 $ map second3 linesWithWidths maxamtwidth = maximumBound 0 $ map third3 linesWithWidths -- | Render one posting, on one or more lines, suitable for `print` output. -- There will be an indented account name, plus one or more of status flag, -- posting amount, balance assertion, same-line comment, next-line comments. -- -- If the posting's amount is implicit or if elideamount is true, no amount is shown. -- -- If the posting's amount is explicit and multi-commodity, multiple similar -- postings are shown, one for each commodity, to help produce parseable journal syntax. -- Or if onelineamounts is true, such amounts are shown on one line, comma-separated -- (and the output will not be valid journal syntax). -- -- By default, 4 spaces (2 if there's a status flag) are shown between -- account name and start of amount area, which is typically 12 chars wide -- and contains a right-aligned amount (so 10-12 visible spaces between -- account name and amount is typical). -- When given a list of postings to be aligned with, the whitespace will be -- increased if needed to match the posting with the longest account name. -- This is used to align the amounts of a transaction's postings. -- -- Also returns the account width and amount width used. postingAsLines :: Bool -> Bool -> Int -> Int -> Posting -> ([Text], Int, Int) postingAsLines elideamount onelineamounts acctwidth amtwidth p = (concatMap (++ newlinecomments) postingblocks, thisacctwidth, thisamtwidth) where -- This needs to be converted to strict Text in order to strip trailing -- spaces. This adds a small amount of inefficiency, and the only difference -- is whether there are trailing spaces in print (and related) reports. This -- could be removed and we could just keep everything as a Text Builder, but -- would require adding trailing spaces to 42 failing tests. postingblocks = [map T.stripEnd . T.lines . TL.toStrict $ render [ textCell BottomLeft statusandaccount , textCell BottomLeft " " , Cell BottomLeft [pad amt] , Cell BottomLeft [assertion] , textCell BottomLeft samelinecomment ] | (amt,assertion) <- shownAmountsAssertions] render = renderRow def{tableBorders=False, borderSpaces=False} . Group NoLine . map Header pad amt = WideBuilder (TB.fromText $ T.replicate w " ") w <> amt where w = max 12 amtwidth - wbWidth amt -- min. 12 for backwards compatibility pacctstr p' = showAccountName Nothing (ptype p') (paccount p') pstatusandacct p' = pstatusprefix p' <> pacctstr p' pstatusprefix p' = case pstatus p' of Unmarked -> "" s -> T.pack (show s) <> " " -- currently prices are considered part of the amount string when right-aligning amounts -- Since we will usually be calling this function with the knot tied between -- amtwidth and thisamtwidth, make sure thisamtwidth does not depend on -- amtwidth at all. shownAmounts | elideamount = [mempty] | otherwise = showMixedAmountLinesB noColour{displayOneLine=onelineamounts} $ pamount p thisamtwidth = maximumBound 0 $ map wbWidth shownAmounts -- when there is a balance assertion, show it only on the last posting line shownAmountsAssertions = zip shownAmounts shownAssertions where shownAssertions = replicate (length shownAmounts - 1) mempty ++ [assertion] where assertion = maybe mempty ((WideBuilder (TB.singleton ' ') 1 <>).showBalanceAssertion) $ pbalanceassertion p -- pad to the maximum account name width, plus 2 to leave room for status flags, to keep amounts aligned statusandaccount = lineIndent . fitText (Just $ 2 + acctwidth) Nothing False True $ pstatusandacct p thisacctwidth = realLength $ pacctstr p (samelinecomment, newlinecomments) = case renderCommentLines (pcomment p) of [] -> ("",[]) c:cs -> (c,cs) -- | Show an account name, clipped to the given width if any, and -- appropriately bracketed/parenthesised for the given posting type. showAccountName :: Maybe Int -> PostingType -> AccountName -> Text showAccountName w = fmt where fmt RegularPosting = maybe id T.take w fmt VirtualPosting = wrap "(" ")" . maybe id (T.takeEnd . subtract 2) w fmt BalancedVirtualPosting = wrap "[" "]" . maybe id (T.takeEnd . subtract 2) w -- | Render a transaction or posting's comment as indented, semicolon-prefixed comment lines. -- The first line (unless empty) will have leading space, subsequent lines will have a larger indent. renderCommentLines :: Text -> [Text] renderCommentLines t = case T.lines t of [] -> [] [l] -> [commentSpace $ comment l] -- single-line comment ("":ls) -> "" : map (lineIndent . comment) ls -- multi-line comment with empty first line (l:ls) -> commentSpace (comment l) : map (lineIndent . comment) ls where comment = ("; "<>) -- | Prepend a suitable indent for a posting (or transaction/posting comment) line. lineIndent :: Text -> Text lineIndent = (" "<>) -- | Prepend the space required before a same-line comment. commentSpace :: Text -> Text commentSpace = (" "<>) isReal :: Posting -> Bool isReal p = ptype p == RegularPosting isVirtual :: Posting -> Bool isVirtual p = ptype p == VirtualPosting isBalancedVirtual :: Posting -> Bool isBalancedVirtual p = ptype p == BalancedVirtualPosting hasAmount :: Posting -> Bool hasAmount = not . isMissingMixedAmount . pamount hasBalanceAssignment :: Posting -> Bool hasBalanceAssignment p = not (hasAmount p) && isJust (pbalanceassertion p) -- | Sorted unique account names referenced by these postings. accountNamesFromPostings :: [Posting] -> [AccountName] accountNamesFromPostings = S.toList . S.fromList . map paccount -- | Sum all amounts from a list of postings. sumPostings :: [Posting] -> MixedAmount sumPostings = foldl' (\amt p -> maPlus amt $ pamount p) nullmixedamt -- | Strip all prices from a Posting. postingStripPrices :: Posting -> Posting postingStripPrices = postingTransformAmount mixedAmountStripPrices -- | Get a posting's (primary) date - it's own primary date if specified, -- otherwise the parent transaction's primary date, or the null date if -- there is no parent transaction. postingDate :: Posting -> Day postingDate p = fromMaybe nulldate $ asum dates where dates = [ pdate p, tdate <$> ptransaction p ] -- | Get a posting's secondary (secondary) date, which is the first of: -- posting's secondary date, transaction's secondary date, posting's -- primary date, transaction's primary date, or the null date if there is -- no parent transaction. postingDate2 :: Posting -> Day postingDate2 p = fromMaybe nulldate $ asum dates where dates = [ pdate2 p , tdate2 =<< ptransaction p , pdate p , tdate <$> ptransaction p ] -- | Get a posting's primary or secondary date, as specified. postingDateOrDate2 :: WhichDate -> Posting -> Day postingDateOrDate2 PrimaryDate = postingDate postingDateOrDate2 SecondaryDate = postingDate2 -- | Get a posting's status. This is cleared or pending if those are -- explicitly set on the posting, otherwise the status of its parent -- transaction, or unmarked if there is no parent transaction. (Note -- the ambiguity, unmarked can mean "posting and transaction are both -- unmarked" or "posting is unmarked and don't know about the transaction". postingStatus :: Posting -> Status postingStatus Posting{pstatus=s, ptransaction=mt} = case s of Unmarked -> maybe Unmarked tstatus mt _ -> s -- | Tags for this posting including any inherited from its parent transaction. postingAllTags :: Posting -> [Tag] postingAllTags p = ptags p ++ maybe [] ttags (ptransaction p) -- | Tags for this transaction including any from its postings. transactionAllTags :: Transaction -> [Tag] transactionAllTags t = ttags t ++ concatMap ptags (tpostings t) -- Get the other postings from this posting's transaction. relatedPostings :: Posting -> [Posting] relatedPostings p@Posting{ptransaction=Just t} = filter (/= p) $ tpostings t relatedPostings _ = [] -- | Does this posting fall within the given date span ? isPostingInDateSpan :: DateSpan -> Posting -> Bool isPostingInDateSpan = isPostingInDateSpan' PrimaryDate -- --date2-sensitive version, separate for now to avoid disturbing multiBalanceReport. isPostingInDateSpan' :: WhichDate -> DateSpan -> Posting -> Bool isPostingInDateSpan' PrimaryDate s = spanContainsDate s . postingDate isPostingInDateSpan' SecondaryDate s = spanContainsDate s . postingDate2 isEmptyPosting :: Posting -> Bool isEmptyPosting = mixedAmountLooksZero . pamount -- | Apply some account aliases to the posting's account name, as described by accountNameApplyAliases. -- This can fail due to a bad replacement pattern in a regular expression alias. postingApplyAliases :: [AccountAlias] -> Posting -> Either RegexError Posting postingApplyAliases aliases p@Posting{paccount} = case accountNameApplyAliases aliases paccount of Right a -> Right p{paccount=a} Left e -> Left err where err = "problem while applying account aliases:\n" ++ pshow aliases ++ "\n to account name: "++T.unpack paccount++"\n "++e -- | Choose and apply a consistent display style to the posting -- amounts in each commodity (see journalCommodityStyles). postingApplyCommodityStyles :: M.Map CommoditySymbol AmountStyle -> Posting -> Posting postingApplyCommodityStyles styles p = p{pamount=styleMixedAmount styles $ pamount p ,pbalanceassertion=fixbalanceassertion <$> pbalanceassertion p} where fixbalanceassertion ba = ba{baamount=styleAmountExceptPrecision styles $ baamount ba} -- | Add tags to a posting, discarding any for which the posting already has a value. postingAddTags :: Posting -> [Tag] -> Posting postingAddTags p@Posting{ptags} tags = p{ptags=ptags `union` tags} -- | Apply a specified valuation to this posting's amount, using the -- provided price oracle, commodity styles, and reference dates. -- See amountApplyValuation. postingApplyValuation :: PriceOracle -> M.Map CommoditySymbol AmountStyle -> Day -> Day -> ValuationType -> Posting -> Posting postingApplyValuation priceoracle styles periodlast today v p = postingTransformAmount (mixedAmountApplyValuation priceoracle styles periodlast today (postingDate p) v) p -- | Maybe convert this 'Posting's amount to cost, and apply apply appropriate -- amount styles. postingToCost :: M.Map CommoditySymbol AmountStyle -> ConversionOp -> Posting -> Maybe Posting postingToCost _ NoConversionOp p = Just p postingToCost styles ToCost p -- If this is a conversion posting with a matched transaction price posting, ignore it | "_conversion-matched" `elem` map fst (ptags p) && noCost = Nothing | otherwise = Just $ postingTransformAmount (styleMixedAmount styles . mixedAmountCost) p where noCost = (not . any (isJust . aprice) . amountsRaw) $ pamount p -- | Generate inferred equity postings from a 'Posting' using transaction prices. -- Make sure not to generate equity postings when there are already matched -- conversion postings. postingAddInferredEquityPostings :: Bool -> Text -> Posting -> [Posting] postingAddInferredEquityPostings verbosetags equityAcct p | "_price-matched" `elem` map fst (ptags p) = [p] | otherwise = taggedPosting : concatMap conversionPostings priceAmounts where taggedPosting | null priceAmounts = p | otherwise = p{ ptags = ("_price-matched","") : ptags p } conversionPostings amt = case aprice amt of Nothing -> [] Just _ -> [ cp{ paccount = accountPrefix <> amtCommodity , pamount = mixedAmount . negate $ amountStripPrices amt } , cp{ paccount = accountPrefix <> costCommodity , pamount = mixedAmount cost } ] where cost = amountCost amt amtCommodity = commodity amt costCommodity = commodity cost cp = p{ pcomment = pcomment p & (if verbosetags then (`commentAddTag` ("generated-posting","conversion")) else id) , ptags = ("_conversion-matched","") : -- implementation-specific internal tag, not for users ("_generated-posting","conversion") : (if verbosetags then [("generated-posting", "conversion")] else []) , pbalanceassertion = Nothing , poriginal = Nothing } accountPrefix = mconcat [ equityAcct, ":", T.intercalate "-" $ sort [amtCommodity, costCommodity], ":"] -- Take the commodity of an amount and collapse consecutive spaces to a single space commodity = T.unwords . filter (not . T.null) . T.words . acommodity priceAmounts = filter (isJust . aprice) . amountsRaw $ pamount p -- | Make a market price equivalent to this posting's amount's unit -- price, if any. postingPriceDirectivesFromCost :: Posting -> [PriceDirective] postingPriceDirectivesFromCost p@Posting{pamount} = mapMaybe (amountPriceDirectiveFromCost $ postingDate p) $ amountsRaw pamount -- | Apply a transform function to this posting's amount. postingTransformAmount :: (MixedAmount -> MixedAmount) -> Posting -> Posting postingTransformAmount f p@Posting{pamount=a} = p{pamount=f a} -- | Join two parts of a comment, eg a tag and another tag, or a tag -- and a non-tag, on a single line. Interpolates a comma and space -- unless one of the parts is empty. commentJoin :: Text -> Text -> Text commentJoin c1 c2 | T.null c1 = c2 | T.null c2 = c1 | otherwise = c1 <> ", " <> c2 -- | Add a tag to a comment, comma-separated from any prior content. -- A space is inserted following the colon, before the value. commentAddTag :: Text -> Tag -> Text commentAddTag c (t,v) | T.null c' = tag | otherwise = c' `commentJoin` tag where c' = T.stripEnd c tag = t <> ": " <> v -- | Add a tag on its own line to a comment, preserving any prior content. -- A space is inserted following the colon, before the value. commentAddTagNextLine :: Text -> Tag -> Text commentAddTagNextLine cmt (t,v) = cmt <> (if "\n" `T.isSuffixOf` cmt then "" else "\n") <> t <> ": " <> v -- tests tests_Posting = testGroup "Posting" [ testCase "accountNamePostingType" $ do accountNamePostingType "a" @?= RegularPosting accountNamePostingType "(a)" @?= VirtualPosting accountNamePostingType "[a]" @?= BalancedVirtualPosting ,testCase "accountNameWithoutPostingType" $ do accountNameWithoutPostingType "(a)" @?= "a" ,testCase "accountNameWithPostingType" $ do accountNameWithPostingType VirtualPosting "[a]" @?= "(a)" ,testCase "joinAccountNames" $ do "a" `joinAccountNames` "b:c" @?= "a:b:c" "a" `joinAccountNames` "(b:c)" @?= "(a:b:c)" "[a]" `joinAccountNames` "(b:c)" @?= "[a:b:c]" "" `joinAccountNames` "a" @?= "a" ,testCase "concatAccountNames" $ do concatAccountNames [] @?= "" concatAccountNames ["a","(b)","[c:d]"] @?= "(a:b:c:d)" ,testCase "commentAddTag" $ do commentAddTag "" ("a","") @?= "a: " commentAddTag "[1/2]" ("a","") @?= "[1/2], a: " ,testCase "commentAddTagNextLine" $ do commentAddTagNextLine "" ("a","") @?= "\na: " commentAddTagNextLine "[1/2]" ("a","") @?= "[1/2]\na: " ] hledger-lib-1.30/Hledger/Data/RawOptions.hs0000644000000000000000000001162714434445206016702 0ustar0000000000000000{-| hledger's cmdargs modes parse command-line arguments to an intermediate format, RawOpts (an association list), rather than a fixed ADT like CliOpts. This allows the modes and flags to be reused more easily by hledger commands/scripts in this and other packages. -} module Hledger.Data.RawOptions ( RawOpts, setopt, setboolopt, unsetboolopt, appendopts, boolopt, toggleopt, choiceopt, collectopts, stringopt, maybestringopt, listofstringopt, intopt, posintopt, maybeintopt, maybeposintopt, maybecharopt, overRawOpts ) where import Data.Maybe (fromMaybe, isJust, mapMaybe) import Data.Default (Default(..)) import Safe (headMay, lastMay, readDef) import Hledger.Utils -- | The result of running cmdargs: an association list of option names to string values. newtype RawOpts = RawOpts { unRawOpts :: [(String,String)] } deriving (Show) instance Default RawOpts where def = RawOpts [] overRawOpts f = RawOpts . f . unRawOpts setopt :: String -> String -> RawOpts -> RawOpts setopt name val = overRawOpts (++ [(name, val)]) setboolopt :: String -> RawOpts -> RawOpts setboolopt name = overRawOpts (++ [(name,"")]) unsetboolopt :: String -> RawOpts -> RawOpts unsetboolopt name = overRawOpts (filter ((/=name).fst)) appendopts :: [(String,String)] -> RawOpts -> RawOpts appendopts new = overRawOpts (++new) -- | Is the named flag present ? boolopt :: String -> RawOpts -> Bool boolopt name = isJust . lookup name . unRawOpts -- | Like boolopt, except if the flag is repeated on the command line it toggles the value. -- An even number of repetitions is equivalent to none. toggleopt :: String -> RawOpts -> Bool toggleopt name rawopts = odd $ length [ n | (n,_) <- unRawOpts rawopts, n==name] -- | From a list of RawOpts, get the last one (ie the right-most on the command line) -- for which the given predicate returns a Just value. -- Useful for exclusive choice flags like --daily|--weekly|--quarterly... -- -- >>> import Safe (readMay) -- >>> choiceopt Just (RawOpts [("a",""), ("b",""), ("c","")]) -- Just "c" -- >>> choiceopt (const Nothing) (RawOpts [("a","")]) -- Nothing -- >>> choiceopt readMay (RawOpts [("LT",""),("EQ",""),("Neither","")]) :: Maybe Ordering -- Just EQ choiceopt :: (String -> Maybe a) -- ^ "parser" that returns 'Just' value for valid choice -> RawOpts -- ^ actual options where to look for flag -> Maybe a -- ^ exclusive choice among those returned as 'Just' from "parser" choiceopt f = lastMay . collectopts (f . fst) -- | Collects processed and filtered list of options preserving their order -- -- >>> collectopts (const Nothing) (RawOpts [("x","")]) -- [] -- >>> collectopts Just (RawOpts [("a",""),("b","")]) -- [("a",""),("b","")] collectopts :: ((String, String) -> Maybe a) -> RawOpts -> [a] collectopts f = mapMaybe f . unRawOpts maybestringopt :: String -> RawOpts -> Maybe String maybestringopt name = lookup name . reverse . unRawOpts stringopt :: String -> RawOpts -> String stringopt name = fromMaybe "" . maybestringopt name maybecharopt :: String -> RawOpts -> Maybe Char maybecharopt name (RawOpts rawopts) = lookup name rawopts >>= headMay listofstringopt :: String -> RawOpts -> [String] listofstringopt name (RawOpts rawopts) = [v | (k,v) <- rawopts, k==name] -- | Reads the named option's Int argument, if it is present. -- An argument that is too small or too large will raise an error. maybeintopt :: String -> RawOpts -> Maybe Int maybeintopt = maybeclippedintopt minBound maxBound -- | Reads the named option's natural-number argument, if it is present. -- An argument that is negative or too large will raise an error. maybeposintopt :: String -> RawOpts -> Maybe Int maybeposintopt = maybeclippedintopt 0 maxBound -- | Reads the named option's Int argument. If not present it will -- return 0. An argument that is too small or too large will raise an error. intopt :: String -> RawOpts -> Int intopt name = fromMaybe 0 . maybeintopt name -- | Reads the named option's natural-number argument. If not present it will -- return 0. An argument that is negative or too large will raise an error. posintopt :: String -> RawOpts -> Int posintopt name = fromMaybe 0 . maybeposintopt name -- | Reads the named option's Int argument, if it is present. An argument -- that does not fit within the given bounds will raise an error. maybeclippedintopt :: Int -> Int -> String -> RawOpts -> Maybe Int maybeclippedintopt minVal maxVal name = fmap (intOrError . readOrError) . maybestringopt name where readOrError s = readDef (usageError $ "could not parse " ++ name ++ " number: " ++ s) s intOrError n | n >= toInteger minVal && n <= toInteger maxVal = fromInteger n | otherwise = usageError $ "argument to " ++ name ++ " must lie in the range " ++ show minVal ++ " to " ++ show maxVal ++ ", but is " ++ show n hledger-lib-1.30/Hledger/Data/Timeclock.hs0000644000000000000000000001661114434445206016505 0ustar0000000000000000{-| A 'TimeclockEntry' is a clock-in, clock-out, or other directive in a timeclock file (see timeclock.el or the command-line version). These can be converted to 'Transactions' and queried like a ledger. -} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Timeclock ( timeclockEntriesToTransactions ,tests_Timeclock ) where import Data.Maybe (fromMaybe) import qualified Data.Text as T import Data.Time.Calendar (addDays) import Data.Time.Clock (addUTCTime, getCurrentTime) import Data.Time.Format (defaultTimeLocale, formatTime, parseTimeM) import Data.Time.LocalTime (LocalTime(..), TimeOfDay(..), getCurrentTimeZone, localTimeToUTC, midnight, utc, utcToLocalTime) import Text.Printf (printf) import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Amount import Hledger.Data.Posting instance Show TimeclockEntry where show t = printf "%s %s %s %s" (show $ tlcode t) (show $ tldatetime t) (tlaccount t) (tldescription t) instance Show TimeclockCode where show SetBalance = "b" show SetRequiredHours = "h" show In = "i" show Out = "o" show FinalOut = "O" instance Read TimeclockCode where readsPrec _ ('b' : xs) = [(SetBalance, xs)] readsPrec _ ('h' : xs) = [(SetRequiredHours, xs)] readsPrec _ ('i' : xs) = [(In, xs)] readsPrec _ ('o' : xs) = [(Out, xs)] readsPrec _ ('O' : xs) = [(FinalOut, xs)] readsPrec _ _ = [] -- | Convert time log entries to journal transactions. When there is no -- clockout, add one with the provided current time. Sessions crossing -- midnight are split into days to give accurate per-day totals. timeclockEntriesToTransactions :: LocalTime -> [TimeclockEntry] -> [Transaction] timeclockEntriesToTransactions _ [] = [] timeclockEntriesToTransactions now [i] | tlcode i /= In = errorExpectedCodeButGot In i | odate > idate = entryFromTimeclockInOut i o' : timeclockEntriesToTransactions now [i',o] | otherwise = [entryFromTimeclockInOut i o] where o = TimeclockEntry (tlsourcepos i) Out end "" "" "" [] end = if itime > now then itime else now (itime,otime) = (tldatetime i,tldatetime o) (idate,odate) = (localDay itime,localDay otime) o' = o{tldatetime=itime{localDay=idate, localTimeOfDay=TimeOfDay 23 59 59}} i' = i{tldatetime=itime{localDay=addDays 1 idate, localTimeOfDay=midnight}} timeclockEntriesToTransactions now (i:o:rest) | tlcode i /= In = errorExpectedCodeButGot In i | tlcode o /= Out = errorExpectedCodeButGot Out o | odate > idate = entryFromTimeclockInOut i o' : timeclockEntriesToTransactions now (i':o:rest) | otherwise = entryFromTimeclockInOut i o : timeclockEntriesToTransactions now rest where (itime,otime) = (tldatetime i,tldatetime o) (idate,odate) = (localDay itime,localDay otime) o' = o{tldatetime=itime{localDay=idate, localTimeOfDay=TimeOfDay 23 59 59}} i' = i{tldatetime=itime{localDay=addDays 1 idate, localTimeOfDay=midnight}} {- HLINT ignore timeclockEntriesToTransactions -} errorExpectedCodeButGot :: TimeclockCode -> TimeclockEntry -> a errorExpectedCodeButGot expected actual = error' $ printf ("%s:\n%s\n%s\n\nExpected a timeclock %s entry but got %s.\n" ++"Only one session may be clocked in at a time.\n" ++"Please alternate i and o, beginning with i.") (sourcePosPretty $ tlsourcepos actual) (l ++ " | " ++ show actual) (replicate (length l) ' ' ++ " |" ++ replicate c ' ' ++ "^") (show expected) (show $ tlcode actual) where l = show $ unPos $ sourceLine $ tlsourcepos actual c = unPos $ sourceColumn $ tlsourcepos actual -- | Convert a timeclock clockin and clockout entry to an equivalent journal -- transaction, representing the time expenditure. Note this entry is not balanced, -- since we omit the \"assets:time\" transaction for simpler output. entryFromTimeclockInOut :: TimeclockEntry -> TimeclockEntry -> Transaction entryFromTimeclockInOut i o | otime >= itime = t | otherwise = -- Clockout time earlier than clockin is an error. -- (Clockin earlier than preceding clockin/clockout is allowed.) error' $ printf ("%s:\n%s\nThis clockout time (%s) is earlier than the previous clockin.\n" ++"Please adjust it to be later than %s.") (sourcePosPretty $ tlsourcepos o) (unlines [ replicate (length l) ' '++ " | " ++ show i, l ++ " | " ++ show o, (replicate (length l) ' ' ++ " |" ++ replicate c ' ' ++ replicate 19 '^') ]) (show $ tldatetime o) (show $ tldatetime i) where l = show $ unPos $ sourceLine $ tlsourcepos o c = (unPos $ sourceColumn $ tlsourcepos o) + 2 t = Transaction { tindex = 0, tsourcepos = (tlsourcepos i, tlsourcepos i), tdate = idate, tdate2 = Nothing, tstatus = Cleared, tcode = "", tdescription = desc, tcomment = tlcomment i, ttags = tltags i, tpostings = ps, tprecedingcomment="" } itime = tldatetime i otime = tldatetime o itod = localTimeOfDay itime otod = localTimeOfDay otime idate = localDay itime desc | T.null (tldescription i) = T.pack $ showtime itod ++ "-" ++ showtime otod | otherwise = tldescription i showtime = take 5 . show hours = elapsedSeconds (toutc otime) (toutc itime) / 3600 where toutc = localTimeToUTC utc acctname = tlaccount i -- Generate an hours amount. Unusually, we also round the internal Decimal value, -- since otherwise it will often have large recurring decimal parts which (since 1.21) -- print would display all 255 digits of. timeclock amounts have one second resolution, -- so two decimal places is precise enough (#1527). amt = mixedAmount $ setAmountInternalPrecision 2 $ hrs hours ps = [posting{paccount=acctname, pamount=amt, ptype=VirtualPosting, ptransaction=Just t}] -- tests tests_Timeclock = testGroup "Timeclock" [ testCaseSteps "timeclockEntriesToTransactions tests" $ \step -> do step "gathering data" today <- getCurrentDay now' <- getCurrentTime tz <- getCurrentTimeZone let now = utcToLocalTime tz now' nowstr = showtime now yesterday = prevday today clockin = TimeclockEntry (initialPos "") In mktime d = LocalTime d . fromMaybe midnight . parseTimeM True defaultTimeLocale "%H:%M:%S" showtime = formatTime defaultTimeLocale "%H:%M" txndescs = map (T.unpack . tdescription) . timeclockEntriesToTransactions now future = utcToLocalTime tz $ addUTCTime 100 now' futurestr = showtime future step "started yesterday, split session at midnight" txndescs [clockin (mktime yesterday "23:00:00") "" "" "" []] @?= ["23:00-23:59","00:00-"++nowstr] step "split multi-day sessions at each midnight" txndescs [clockin (mktime (addDays (-2) today) "23:00:00") "" "" "" []] @?= ["23:00-23:59","00:00-23:59","00:00-"++nowstr] step "auto-clock-out if needed" txndescs [clockin (mktime today "00:00:00") "" "" "" []] @?= ["00:00-"++nowstr] step "use the clockin time for auto-clockout if it's in the future" txndescs [clockin future "" "" "" []] @?= [printf "%s-%s" futurestr futurestr] ] hledger-lib-1.30/Hledger/Data/Transaction.hs0000644000000000000000000006406414434445206017065 0ustar0000000000000000{-| A 'Transaction' represents a movement of some commodity(ies) between two or more accounts. It consists of multiple account 'Posting's which balance to zero, a date, and optional extras like description, cleared status, and tags. -} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} module Hledger.Data.Transaction ( -- * Transaction nulltransaction , transaction , txnTieKnot , txnUntieKnot -- * operations , hasRealPostings , realPostings , assignmentPostings , virtualPostings , balancedVirtualPostings , transactionsPostings , transactionTransformPostings , transactionApplyValuation , transactionToCost , transactionAddInferredEquityPostings , transactionInferCostsFromEquity , transactionApplyAliases , transactionMapPostings , transactionMapPostingAmounts , partitionAndCheckConversionPostings -- nonzerobalanceerror -- * date operations , transactionDate2 , transactionDateOrDate2 -- * transaction description parts , transactionPayee , transactionNote -- payeeAndNoteFromDescription -- * rendering , showTransaction , showTransactionOneLineAmounts , showTransactionLineFirstPart , transactionFile -- * transaction errors , annotateErrorWithTransaction -- * tests , tests_Transaction ) where import Control.Monad.Trans.State (StateT(..), evalStateT) import Data.Bifunctor (first) import Data.Foldable (foldrM) import Data.Maybe (fromMaybe, isJust, mapMaybe) import Data.Semigroup (Endo(..)) import Data.Text (Text) import qualified Data.Map as M import qualified Data.Text as T import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy.Builder as TB import Data.Time.Calendar (Day, fromGregorian) import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Dates import Hledger.Data.Posting import Hledger.Data.Amount import Hledger.Data.Valuation nulltransaction :: Transaction nulltransaction = Transaction { tindex=0, tsourcepos=nullsourcepos, tdate=nulldate, tdate2=Nothing, tstatus=Unmarked, tcode="", tdescription="", tcomment="", ttags=[], tpostings=[], tprecedingcomment="" } -- | Make a simple transaction with the given date and postings. transaction :: Day -> [Posting] -> Transaction transaction day ps = txnTieKnot $ nulltransaction{tdate=day, tpostings=ps} transactionPayee :: Transaction -> Text transactionPayee = fst . payeeAndNoteFromDescription . tdescription transactionNote :: Transaction -> Text transactionNote = snd . payeeAndNoteFromDescription . tdescription -- | Parse a transaction's description into payee and note (aka narration) fields, -- assuming a convention of separating these with | (like Beancount). -- Ie, everything up to the first | is the payee, everything after it is the note. -- When there's no |, payee == note == description. payeeAndNoteFromDescription :: Text -> (Text,Text) payeeAndNoteFromDescription t | T.null n = (t, t) | otherwise = (T.strip p, T.strip $ T.drop 1 n) where (p, n) = T.span (/= '|') t {-| Render a journal transaction as text similar to the style of Ledger's print command. Adapted from Ledger 2.x and 3.x standard format: @ yyyy-mm-dd[ *][ CODE] description......... [ ; comment...............] account name 1..................... ...$amount1[ ; comment...............] account name 2..................... ..$-amount1[ ; comment...............] pcodewidth = no limit -- 10 -- mimicking ledger layout. pdescwidth = no limit -- 20 -- I don't remember what these mean, pacctwidth = 35 minimum, no maximum -- they were important at the time. pamtwidth = 11 pcommentwidth = no limit -- 22 @ The output will be parseable journal syntax. To facilitate this, postings with explicit multi-commodity amounts are displayed as multiple similar postings, one per commodity. (Normally does not happen with this function). -} showTransaction :: Transaction -> Text showTransaction = TL.toStrict . TB.toLazyText . showTransactionHelper False -- | Like showTransaction, but explicit multi-commodity amounts -- are shown on one line, comma-separated. In this case the output will -- not be parseable journal syntax. showTransactionOneLineAmounts :: Transaction -> Text showTransactionOneLineAmounts = TL.toStrict . TB.toLazyText . showTransactionHelper True -- | Helper for showTransaction*. showTransactionHelper :: Bool -> Transaction -> TB.Builder showTransactionHelper onelineamounts t = TB.fromText descriptionline <> newline <> foldMap ((<> newline) . TB.fromText) newlinecomments <> foldMap ((<> newline) . TB.fromText) (postingsAsLines onelineamounts $ tpostings t) <> newline where descriptionline = T.stripEnd $ showTransactionLineFirstPart t <> T.concat [desc, samelinecomment] desc = if T.null d then "" else " " <> d where d = tdescription t (samelinecomment, newlinecomments) = case renderCommentLines (tcomment t) of [] -> ("",[]) c:cs -> (c,cs) newline = TB.singleton '\n' -- Useful when rendering error messages. showTransactionLineFirstPart t = T.concat [date, status, code] where date = showDate (tdate t) <> maybe "" (("="<>) . showDate) (tdate2 t) status | tstatus t == Cleared = " *" | tstatus t == Pending = " !" | otherwise = "" code = if T.null (tcode t) then "" else wrap " (" ")" $ tcode t hasRealPostings :: Transaction -> Bool hasRealPostings = not . null . realPostings realPostings :: Transaction -> [Posting] realPostings = filter isReal . tpostings assignmentPostings :: Transaction -> [Posting] assignmentPostings = filter hasBalanceAssignment . tpostings virtualPostings :: Transaction -> [Posting] virtualPostings = filter isVirtual . tpostings balancedVirtualPostings :: Transaction -> [Posting] balancedVirtualPostings = filter isBalancedVirtual . tpostings transactionsPostings :: [Transaction] -> [Posting] transactionsPostings = concatMap tpostings -- Get a transaction's secondary date, or the primary date if there is none. transactionDate2 :: Transaction -> Day transactionDate2 t = fromMaybe (tdate t) $ tdate2 t -- Get a transaction's primary or secondary date, as specified. transactionDateOrDate2 :: WhichDate -> Transaction -> Day transactionDateOrDate2 PrimaryDate = tdate transactionDateOrDate2 SecondaryDate = transactionDate2 -- | Ensure a transaction's postings refer back to it, so that eg -- relatedPostings works right. txnTieKnot :: Transaction -> Transaction txnTieKnot t@Transaction{tpostings=ps} = t' where t' = t{tpostings=map (postingSetTransaction t') ps} -- | Ensure a transaction's postings do not refer back to it, so that eg -- recursiveSize and GHCI's :sprint work right. txnUntieKnot :: Transaction -> Transaction txnUntieKnot t@Transaction{tpostings=ps} = t{tpostings=map (\p -> p{ptransaction=Nothing}) ps} -- | Set a posting's parent transaction. postingSetTransaction :: Transaction -> Posting -> Posting postingSetTransaction t p = p{ptransaction=Just t} -- | Apply a transform function to this transaction's amounts. transactionTransformPostings :: (Posting -> Posting) -> Transaction -> Transaction transactionTransformPostings f t@Transaction{tpostings=ps} = t{tpostings=map f ps} -- | Apply a specified valuation to this transaction's amounts, using -- the provided price oracle, commodity styles, and reference dates. -- See amountApplyValuation. transactionApplyValuation :: PriceOracle -> M.Map CommoditySymbol AmountStyle -> Day -> Day -> ValuationType -> Transaction -> Transaction transactionApplyValuation priceoracle styles periodlast today v = transactionTransformPostings (postingApplyValuation priceoracle styles periodlast today v) -- | Maybe convert this 'Transaction's amounts to cost and apply the -- appropriate amount styles. transactionToCost :: M.Map CommoditySymbol AmountStyle -> ConversionOp -> Transaction -> Transaction transactionToCost styles cost t = t{tpostings = mapMaybe (postingToCost styles cost) $ tpostings t} -- | Add inferred equity postings to a 'Transaction' using transaction prices. transactionAddInferredEquityPostings :: Bool -> AccountName -> Transaction -> Transaction transactionAddInferredEquityPostings verbosetags equityAcct t = t{tpostings=concatMap (postingAddInferredEquityPostings verbosetags equityAcct) $ tpostings t} type IdxPosting = (Int, Posting) -- WARNING: twisty code ahead -- | Add costs inferred from equity postings in this transaction. -- For every adjacent pair of conversion postings, it will first search the postings -- with costs to see if any match. If so, it will tag these as matched. -- If no postings with costs match, it will then search the postings without costs, -- and will match the first such posting which matches one of the conversion amounts. -- If it finds a match, it will add a cost and then tag it. -- If the first argument is true, do a dry run instead: identify and tag -- the costful and conversion postings, but don't add costs. transactionInferCostsFromEquity :: Bool -> M.Map AccountName AccountType -> Transaction -> Either String Transaction transactionInferCostsFromEquity dryrun acctTypes t = first (annotateErrorWithTransaction t . T.unpack) $ do (conversionPairs, stateps) <- partitionAndCheckConversionPostings False acctTypes npostings f <- transformIndexedPostingsF (addCostsToPostings dryrun) conversionPairs stateps return t{tpostings = map (snd . f) npostings} where -- Include indices for postings npostings = zip [0..] $ tpostings t transformIndexedPostingsF f = evalStateT . fmap (appEndo . foldMap Endo) . traverse f -- Given a pair of indexed conversion postings, and a state consisting of lists of -- costful and costless non-conversion postings, create a function which adds a conversion cost -- to the posting which matches the conversion postings if necessary, -- and tags the conversion and matched postings. Then update the state by removing the -- matched postings. If there are no matching postings or too much ambiguity, -- return an error string annotated with the conversion postings. -- If the first argument is true, do a dry run instead: identify and tag -- the costful and conversion postings, but don't add costs. addCostsToPostings :: Bool -> (IdxPosting, IdxPosting) -> StateT ([IdxPosting], [IdxPosting]) (Either Text) (IdxPosting -> IdxPosting) addCostsToPostings dryrun' ((n1, cp1), (n2, cp2)) = StateT $ \(costps, otherps) -> do -- Get the two conversion posting amounts, if possible ca1 <- conversionPostingAmountNoCost cp1 ca2 <- conversionPostingAmountNoCost cp2 let -- The function to add costs and tag postings in the indexed list of postings transformPostingF np costp (n,p) = (n, if | n == np -> costp `postingAddTags` [("_price-matched","")] | n == n1 || n == n2 -> p `postingAddTags` [("_conversion-matched","")] | otherwise -> p) -- All costful postings which match the conversion posting pair matchingCostPs = mapMaybe (mapM $ costfulPostingIfMatchesBothAmounts ca1 ca2) costps -- All other postings which match at least one of the conversion posting pair. -- Add a corresponding cost to these postings, unless in dry run mode. matchingOtherPs | dryrun' = [(n,(p, a)) | (n,p) <- otherps, let Just a = postingSingleAmount p] | otherwise = mapMaybe (mapM $ addCostIfMatchesOneAmount ca1 ca2) otherps -- Annotate any errors with the conversion posting pair first (annotateWithPostings [cp1, cp2]) $ if -- If a single costful posting matches the conversion postings, -- delete it from the list of costful postings in the state, delete the -- first matching costless posting from the list of costless postings -- in the state, and return the transformation function with the new state. | [(np, (costp, _))] <- matchingCostPs , Just newcostps <- deleteIdx np costps -> Right (transformPostingF np costp, (if dryrun' then costps else newcostps, otherps)) -- If no costful postings match the conversion postings, but some -- of the costless postings match, check that the first such posting has a -- different amount from all the others, and if so add a cost to it, -- then delete it from the list of costless postings in the state, -- and return the transformation function with the new state. | [] <- matchingCostPs , (np, (costp, amt)):nps <- matchingOtherPs , not $ any (amountMatches amt . snd . snd) nps , Just newotherps <- deleteIdx np otherps -> Right (transformPostingF np costp, (costps, if dryrun' then otherps else newotherps)) -- Otherwise it's too ambiguous to make a guess, so return an error. | otherwise -> Left "There is not a unique posting which matches the conversion posting pair:" -- If a posting with cost matches both the conversion amounts, return it along -- with the matching amount which must be present in another non-conversion posting. costfulPostingIfMatchesBothAmounts :: Amount -> Amount -> Posting -> Maybe (Posting, Amount) costfulPostingIfMatchesBothAmounts a1 a2 p = do a@Amount{aprice=Just _} <- postingSingleAmount p if | amountMatches (-a1) a && amountMatches a2 (amountCost a) -> Just (p, -a2) | amountMatches (-a2) a && amountMatches a1 (amountCost a) -> Just (p, -a1) | otherwise -> Nothing -- Add a cost to a posting if it matches (negative) one of the -- supplied conversion amounts, adding the other amount as the cost. addCostIfMatchesOneAmount :: Amount -> Amount -> Posting -> Maybe (Posting, Amount) addCostIfMatchesOneAmount a1 a2 p = do a <- postingSingleAmount p let newp cost = p{pamount = mixedAmount a{aprice = Just $ TotalPrice cost}} if | amountMatches (-a1) a -> Just (newp a2, a2) | amountMatches (-a2) a -> Just (newp a1, a1) | otherwise -> Nothing -- Get the single-commodity costless amount from a conversion posting, or raise an error. conversionPostingAmountNoCost p = case postingSingleAmount p of Just a@Amount{aprice=Nothing} -> Right a Just Amount{aprice=Just _} -> Left $ annotateWithPostings [p] "Conversion postings must not have a cost:" Nothing -> Left $ annotateWithPostings [p] "Conversion postings must have a single-commodity amount:" amountMatches a b = acommodity a == acommodity b && aquantity a == aquantity b -- Delete a posting from the indexed list of postings based on either its -- index or its posting amount. -- Note: traversing the whole list to delete a single match is generally not efficient, -- but given that a transaction probably doesn't have more than four postings, it should -- still be more efficient than using a Map or another data structure. Even monster -- transactions with up to 10 postings, which are generally not a good -- idea, are still too small for there to be an advantage. -- XXX shouldn't assume transactions have few postings deleteIdx n = deleteUniqueMatch ((n==) . fst) deleteUniqueMatch p (x:xs) | p x = if any p xs then Nothing else Just xs | otherwise = (x:) <$> deleteUniqueMatch p xs deleteUniqueMatch _ [] = Nothing annotateWithPostings xs str = T.unlines $ str : postingsAsLines False xs -- Using the provided account types map, sort the given indexed postings -- into three lists of posting numbers (stored in two pairs), like so: -- (conversion postings, (costful postings, other postings)). -- A true first argument activates its secondary function: check that all -- conversion postings occur in adjacent pairs, otherwise return an error. partitionAndCheckConversionPostings :: Bool -> M.Map AccountName AccountType -> [IdxPosting] -> Either Text ( [(IdxPosting, IdxPosting)], ([IdxPosting], [IdxPosting]) ) partitionAndCheckConversionPostings check acctTypes = fmap fst . foldrM select (([], ([], [])), Nothing) where select np@(_, p) ((cs, others@(ps, os)), Nothing) | isConversion p = Right ((cs, others), Just np) | hasCost p = Right ((cs, (np:ps, os)), Nothing) | otherwise = Right ((cs, (ps, np:os)), Nothing) select np@(_, p) ((cs, others@(ps,os)), Just lst) | isConversion p = Right (((lst, np):cs, others), Nothing) | check = Left "Conversion postings must occur in adjacent pairs" | otherwise = Right ((cs, (ps, np:os)), Nothing) isConversion p = M.lookup (paccount p) acctTypes == Just Conversion hasCost p = isJust $ aprice =<< postingSingleAmount p -- | Get a posting's amount if it is single-commodity. postingSingleAmount :: Posting -> Maybe Amount postingSingleAmount p = case amountsRaw (pamount p) of [a] -> Just a _ -> Nothing -- | Apply some account aliases to all posting account names in the transaction, as described by accountNameApplyAliases. -- This can fail due to a bad replacement pattern in a regular expression alias. transactionApplyAliases :: [AccountAlias] -> Transaction -> Either RegexError Transaction transactionApplyAliases aliases t = case mapM (postingApplyAliases aliases) $ tpostings t of Right ps -> Right $ txnTieKnot $ t{tpostings=ps} Left err -> Left err -- | Apply a transformation to a transaction's postings. transactionMapPostings :: (Posting -> Posting) -> Transaction -> Transaction transactionMapPostings f t@Transaction{tpostings=ps} = t{tpostings=map f ps} -- | Apply a transformation to a transaction's posting amounts. transactionMapPostingAmounts :: (MixedAmount -> MixedAmount) -> Transaction -> Transaction transactionMapPostingAmounts f = transactionMapPostings (postingTransformAmount f) -- | The file path from which this transaction was parsed. transactionFile :: Transaction -> FilePath transactionFile Transaction{tsourcepos} = sourceName $ fst tsourcepos -- Add transaction information to an error message. annotateErrorWithTransaction :: Transaction -> String -> String annotateErrorWithTransaction t s = unlines [ sourcePosPairPretty $ tsourcepos t, s , T.unpack . T.stripEnd $ showTransaction t ] -- tests tests_Transaction :: TestTree tests_Transaction = testGroup "Transaction" [ testGroup "showPostingLines" [ testCase "null posting" $ showPostingLines nullposting @?= [" 0"] , testCase "non-null posting" $ let p = posting { pstatus = Cleared , paccount = "a" , pamount = mixed [usd 1, hrs 2] , pcomment = "pcomment1\npcomment2\n tag3: val3 \n" , ptype = RegularPosting , ptags = [("ptag1", "val1"), ("ptag2", "val2")] } in showPostingLines p @?= [ " * a $1.00 ; pcomment1" , " ; pcomment2" , " ; tag3: val3 " , " * a 2.00h ; pcomment1" , " ; pcomment2" , " ; tag3: val3 " ] ] , let -- one implicit amount timp = nulltransaction {tpostings = ["a" `post` usd 1, "b" `post` missingamt]} -- explicit amounts, balanced texp = nulltransaction {tpostings = ["a" `post` usd 1, "b" `post` usd (-1)]} -- explicit amount, only one posting texp1 = nulltransaction {tpostings = ["(a)" `post` usd 1]} -- explicit amounts, two commodities, explicit balancing price texp2 = nulltransaction {tpostings = ["a" `post` usd 1, "b" `post` (hrs (-1) `at` usd 1)]} -- explicit amounts, two commodities, implicit balancing price texp2b = nulltransaction {tpostings = ["a" `post` usd 1, "b" `post` hrs (-1)]} -- one missing amount, not the last one t3 = nulltransaction {tpostings = ["a" `post` usd 1, "b" `post` missingamt, "c" `post` usd (-1)]} -- unbalanced amounts when precision is limited (#931) -- t4 = nulltransaction {tpostings = ["a" `post` usd (-0.01), "b" `post` usd (0.005), "c" `post` usd (0.005)]} in testGroup "postingsAsLines" [ testCase "null-transaction" $ postingsAsLines False (tpostings nulltransaction) @?= [] , testCase "implicit-amount" $ postingsAsLines False (tpostings timp) @?= [ " a $1.00" , " b" -- implicit amount remains implicit ] , testCase "explicit-amounts" $ postingsAsLines False (tpostings texp) @?= [ " a $1.00" , " b $-1.00" ] , testCase "one-explicit-amount" $ postingsAsLines False (tpostings texp1) @?= [ " (a) $1.00" ] , testCase "explicit-amounts-two-commodities" $ postingsAsLines False (tpostings texp2) @?= [ " a $1.00" , " b -1.00h @ $1.00" ] , testCase "explicit-amounts-not-explicitly-balanced" $ postingsAsLines False (tpostings texp2b) @?= [ " a $1.00" , " b -1.00h" ] , testCase "implicit-amount-not-last" $ postingsAsLines False (tpostings t3) @?= [" a $1.00", " b", " c $-1.00"] -- , testCase "ensure-visibly-balanced" $ -- in postingsAsLines False (tpostings t4) @?= -- [" a $-0.01", " b $0.005", " c $0.005"] ] , testGroup "showTransaction" [ testCase "null transaction" $ showTransaction nulltransaction @?= "0000-01-01\n\n" , testCase "non-null transaction" $ showTransaction nulltransaction { tdate = fromGregorian 2012 05 14 , tdate2 = Just $ fromGregorian 2012 05 15 , tstatus = Unmarked , tcode = "code" , tdescription = "desc" , tcomment = "tcomment1\ntcomment2\n" , ttags = [("ttag1", "val1")] , tpostings = [ nullposting { pstatus = Cleared , paccount = "a" , pamount = mixed [usd 1, hrs 2] , pcomment = "\npcomment2\n" , ptype = RegularPosting , ptags = [("ptag1", "val1"), ("ptag2", "val2")] } ] } @?= T.unlines [ "2012-05-14=2012-05-15 (code) desc ; tcomment1" , " ; tcomment2" , " * a $1.00" , " ; pcomment2" , " * a 2.00h" , " ; pcomment2" , "" ] , testCase "show a balanced transaction" $ (let t = Transaction 0 "" nullsourcepos (fromGregorian 2007 01 28) Nothing Unmarked "" "coopportunity" "" [] [ posting {paccount = "expenses:food:groceries", pamount = mixedAmount (usd 47.18), ptransaction = Just t} , posting {paccount = "assets:checking", pamount = mixedAmount (usd (-47.18)), ptransaction = Just t} ] in showTransaction t) @?= (T.unlines [ "2007-01-28 coopportunity" , " expenses:food:groceries $47.18" , " assets:checking $-47.18" , "" ]) , testCase "show an unbalanced transaction, should not elide" $ (showTransaction (txnTieKnot $ Transaction 0 "" nullsourcepos (fromGregorian 2007 01 28) Nothing Unmarked "" "coopportunity" "" [] [ posting {paccount = "expenses:food:groceries", pamount = mixedAmount (usd 47.18)} , posting {paccount = "assets:checking", pamount = mixedAmount (usd (-47.19))} ])) @?= (T.unlines [ "2007-01-28 coopportunity" , " expenses:food:groceries $47.18" , " assets:checking $-47.19" , "" ]) , testCase "show a transaction with one posting and a missing amount" $ (showTransaction (txnTieKnot $ Transaction 0 "" nullsourcepos (fromGregorian 2007 01 28) Nothing Unmarked "" "coopportunity" "" [] [posting {paccount = "expenses:food:groceries", pamount = missingmixedamt}])) @?= (T.unlines ["2007-01-28 coopportunity", " expenses:food:groceries", ""]) , testCase "show a transaction with a priced commodityless amount" $ (showTransaction (txnTieKnot $ Transaction 0 "" nullsourcepos (fromGregorian 2010 01 01) Nothing Unmarked "" "x" "" [] [ posting {paccount = "a", pamount = mixedAmount $ num 1 `at` (usd 2 `withPrecision` Precision 0)} , posting {paccount = "b", pamount = missingmixedamt} ])) @?= (T.unlines ["2010-01-01 x", " a 1 @ $2", " b", ""]) ] ] hledger-lib-1.30/Hledger/Data/TransactionModifier.hs0000644000000000000000000001731614434445206020542 0ustar0000000000000000{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ViewPatterns #-} {-| A 'TransactionModifier' is a rule that modifies certain 'Transaction's, typically adding automated postings to them. -} module Hledger.Data.TransactionModifier ( modifyTransactions ) where import Prelude hiding (Applicative(..)) import Control.Applicative (Applicative(..), (<|>)) import Data.Function ((&)) import qualified Data.Map as M import Data.Maybe (catMaybes) import qualified Data.Text as T import Data.Time.Calendar (Day) import Hledger.Data.Types import Hledger.Data.Amount import Hledger.Data.Dates import Hledger.Data.Transaction (txnTieKnot) import Hledger.Query (Query, filterQuery, matchesAmount, matchesPostingExtra, parseQuery, queryIsAmt, queryIsSym, simplifyQuery) import Hledger.Data.Posting (commentJoin, commentAddTag, postingAddTags, postingApplyCommodityStyles) import Hledger.Utils (dbg6, wrap) -- $setup -- >>> :set -XOverloadedStrings -- >>> import Hledger.Data.Posting -- >>> import Hledger.Data.Transaction -- >>> import Hledger.Data.Journal -- | Apply all the given transaction modifiers, in turn, to each transaction. -- Or if any of them fails to be parsed, return the first error. A reference -- date is provided to help interpret relative dates in transaction modifier -- queries. modifyTransactions :: (AccountName -> Maybe AccountType) -> (AccountName -> [Tag]) -> M.Map CommoditySymbol AmountStyle -> Day -> Bool -> [TransactionModifier] -> [Transaction] -> Either String [Transaction] modifyTransactions atypes atags styles d verbosetags tmods ts = do fs <- mapM (transactionModifierToFunction atypes atags styles d verbosetags) tmods -- convert modifiers to functions, or return a parse error let modifytxn t = t'' where t' = foldr (flip (.)) id fs t -- apply each function in turn t'' = if t' == t then t' else t'{tcomment=tcomment t' & (if verbosetags then (`commentAddTag` ("modified","")) else id) ,ttags=ttags t' & (("_modified","") :) & (if verbosetags then (("modified","") :) else id) } Right $ map modifytxn ts -- | Converts a 'TransactionModifier' to a 'Transaction'-transforming function -- which applies the modification(s) specified by the TransactionModifier. -- Or, returns the error message there is a problem parsing the TransactionModifier's query. -- A reference date is provided to help interpret relative dates in the query. -- -- The postings of the transformed transaction will reference it in the usual -- way (ie, 'txnTieKnot' is called). -- -- Currently the only kind of modification possible is adding automated -- postings when certain other postings are present. -- -- >>> import qualified Data.Text.IO as T -- >>> t = nulltransaction{tpostings=["ping" `post` usd 1]} -- >>> tmpost acc amt = TMPostingRule (acc `post` amt) False -- >>> test = either putStr (T.putStr.showTransaction) . fmap ($ t) . transactionModifierToFunction (const Nothing) (const []) mempty nulldate True -- >>> test $ TransactionModifier "" ["pong" `tmpost` usd 2] -- 0000-01-01 -- ping $1.00 -- pong $2.00 ; generated-posting: = -- -- >>> test $ TransactionModifier "miss" ["pong" `tmpost` usd 2] -- 0000-01-01 -- ping $1.00 -- -- >>> test $ TransactionModifier "ping" [("pong" `tmpost` nullamt{aquantity=3}){tmprIsMultiplier=True}] -- 0000-01-01 -- ping $1.00 -- pong $3.00 ; generated-posting: = ping -- -- transactionModifierToFunction :: (AccountName -> Maybe AccountType) -> (AccountName -> [Tag]) -> M.Map CommoditySymbol AmountStyle -> Day -> Bool -> TransactionModifier -> Either String (Transaction -> Transaction) transactionModifierToFunction atypes atags styles refdate verbosetags TransactionModifier{tmquerytxt, tmpostingrules} = do q <- simplifyQuery . fst <$> parseQuery refdate tmquerytxt let fs = map (\tmpr -> addAccountTags . tmPostingRuleToFunction verbosetags styles q tmquerytxt tmpr) tmpostingrules addAccountTags p = p `postingAddTags` atags (paccount p) generatePostings p = p : map ($ p) (if matchesPostingExtra atypes q p then fs else []) Right $ \t@(tpostings -> ps) -> txnTieKnot t{tpostings=concatMap generatePostings ps} -- | Converts a 'TransactionModifier''s posting rule to a 'Posting'-generating function, -- which will be used to make a new posting based on the old one (an "automated posting"). -- The new posting's amount can optionally be the old posting's amount multiplied by a constant. -- If the old posting had a total-priced amount, the new posting's multiplied amount will be unit-priced. -- The new posting will have a hidden _generated-posting: tag added, -- and with a true first argument, also a visible generated-posting: tag. -- The provided TransactionModifier's query text is saved as the tags' value. tmPostingRuleToFunction :: Bool -> M.Map CommoditySymbol AmountStyle -> Query -> T.Text -> TMPostingRule -> (Posting -> Posting) tmPostingRuleToFunction verbosetags styles query querytxt tmpr = \p -> postingApplyCommodityStyles styles . renderPostingCommentDates $ pr { pdate = pdate pr <|> pdate p , pdate2 = pdate2 pr <|> pdate2 p , pamount = amount' p , pcomment = pcomment pr & (if verbosetags then (`commentAddTag` ("generated-posting",qry)) else id) , ptags = ptags pr & (("_generated-posting",qry) :) & (if verbosetags then (("generated-posting", qry) :) else id) } where pr = tmprPosting tmpr qry = "= " <> querytxt symq = filterQuery (liftA2 (||) queryIsSym queryIsAmt) query amount' = case postingRuleMultiplier tmpr of Nothing -> const $ pamount pr Just n -> \p -> -- Multiply the old posting's amount by the posting rule's multiplier. let pramount = dbg6 "pramount" . head . amountsRaw $ pamount pr matchedamount = dbg6 "matchedamount" . filterMixedAmount (symq `matchesAmount`) $ pamount p -- Handle a matched amount with a total price carefully so as to keep the transaction balanced (#928). -- Approach 1: convert to a unit price and increase the display precision slightly -- Mixed as = dbg6 "multipliedamount" $ n `multiplyMixedAmount` mixedAmountTotalPriceToUnitPrice matchedamount -- Approach 2: multiply the total price (keeping it positive) as well as the quantity as = dbg6 "multipliedamount" $ multiplyMixedAmount n matchedamount in case acommodity pramount of "" -> as -- TODO multipliers with commodity symbols are not yet a documented feature. -- For now: in addition to multiplying the quantity, it also replaces the -- matched amount's commodity, display style, and price with those of the posting rule. c -> mapMixedAmount (\a -> a{acommodity = c, astyle = astyle pramount, aprice = aprice pramount}) as postingRuleMultiplier :: TMPostingRule -> Maybe Quantity postingRuleMultiplier tmpr = case amountsRaw . pamount $ tmprPosting tmpr of [a] | tmprIsMultiplier tmpr -> Just $ aquantity a _ -> Nothing renderPostingCommentDates :: Posting -> Posting renderPostingCommentDates p = p { pcomment = comment' } where dates = T.concat $ catMaybes [showDate <$> pdate p, ("=" <>) . showDate <$> pdate2 p] comment' | T.null dates = pcomment p | otherwise = (wrap "[" "]" dates) `commentJoin` pcomment p hledger-lib-1.30/Hledger/Data/Types.hs0000644000000000000000000007404014434445206015677 0ustar0000000000000000 {-| Most data types are defined here to avoid import cycles. Here is an overview of the hledger data model: > Journal -- a journal is read from one or more data files. It contains.. > [Transaction] -- journal transactions (aka entries), which have date, cleared status, code, description and.. > [Posting] -- multiple account postings, which have account name and amount > [MarketPrice] -- historical market prices for commodities > > Ledger -- a ledger is derived from a journal, by applying a filter specification and doing some further processing. It contains.. > Journal -- a filtered copy of the original journal, containing only the transactions and postings we are interested in > [Account] -- all accounts, in tree order beginning with a "root" account", with their balances and sub/parent accounts For more detailed documentation on each type, see the corresponding modules. -} -- {-# LANGUAGE DeriveAnyClass #-} -- https://hackage.haskell.org/package/deepseq-1.4.4.0/docs/Control-DeepSeq.html#v:rnf {-# LANGUAGE CPP #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE StandaloneDeriving #-} module Hledger.Data.Types ( module Hledger.Data.Types, #if MIN_VERSION_time(1,11,0) Year #endif ) where import GHC.Generics (Generic) import Data.Decimal (Decimal, DecimalRaw(..)) import Data.Default (Default(..)) import Data.Functor (($>)) import Data.List (intercalate) --XXX https://hackage.haskell.org/package/containers/docs/Data-Map.html --Note: You should use Data.Map.Strict instead of this module if: --You will eventually need all the values stored. --The stored values don't represent large virtual data structures to be lazily computed. import qualified Data.Map as M import Data.Ord (comparing) import Data.Text (Text) import Data.Time.Calendar (Day) import Data.Time.Clock.POSIX (POSIXTime) import Data.Time.LocalTime (LocalTime) import Data.Word (Word8) import Text.Blaze (ToMarkup(..)) import Text.Megaparsec (SourcePos(SourcePos), mkPos) import Hledger.Utils.Regex -- synonyms for various date-related scalars #if MIN_VERSION_time(1,11,0) import Data.Time.Calendar (Year) #else type Year = Integer #endif type Month = Int -- 1-12 type Quarter = Int -- 1-4 type YearWeek = Int -- 1-52 type MonthWeek = Int -- 1-5 type YearDay = Int -- 1-366 type MonthDay = Int -- 1-31 type WeekDay = Int -- 1-7 -- | A possibly incomplete year-month-day date provided by the user, to be -- interpreted as either a date or a date span depending on context. Missing -- parts "on the left" will be filled from the provided reference date, e.g. if -- the year and month are missing, the reference date's year and month are used. -- Missing parts "on the right" are assumed, when interpreting as a date, to be -- 1, (e.g. if the year and month are present but the day is missing, it means -- first day of that month); or when interpreting as a date span, to be a -- wildcard (so it would mean all days of that month). See the `smartdate` -- parser for more examples. -- -- Or, one of the standard periods and an offset relative to the reference date: -- (last|this|next) (day|week|month|quarter|year), where "this" means the period -- containing the reference date. data SmartDate = SmartCompleteDate Day | SmartAssumeStart Year (Maybe Month) -- XXX improve these constructor names | SmartFromReference (Maybe Month) MonthDay -- | SmartMonth Month | SmartRelative Integer SmartInterval deriving (Show) data SmartInterval = Day | Week | Month | Quarter | Year deriving (Show) data WhichDate = PrimaryDate | SecondaryDate deriving (Eq,Show) -- | A date which is either exact or flexible. -- Flexible dates are allowed to be adjusted in certain situations. data EFDay = Exact Day | Flex Day deriving (Eq,Generic,Show) -- EFDay's Ord instance treats them like ordinary dates, ignoring exact/flexible. instance Ord EFDay where compare d1 d2 = compare (fromEFDay d1) (fromEFDay d2) -- instance Ord EFDay where compare = maCompare fromEFDay :: EFDay -> Day fromEFDay (Exact d) = d fromEFDay (Flex d) = d modifyEFDay :: (Day -> Day) -> EFDay -> EFDay modifyEFDay f (Exact d) = Exact $ f d modifyEFDay f (Flex d) = Flex $ f d -- | A possibly open-ended span of time, from an optional inclusive start date -- to an optional exclusive end date. Each date can be either exact or flexible. -- An "exact date span" is a Datepan with exact start and end dates. data DateSpan = DateSpan (Maybe EFDay) (Maybe EFDay) deriving (Eq,Ord,Generic) instance Default DateSpan where def = DateSpan Nothing Nothing -- Typical report periods (spans of time), both finite and open-ended. -- A higher-level abstraction than DateSpan. data Period = DayPeriod Day | WeekPeriod Day | MonthPeriod Year Month | QuarterPeriod Year Quarter | YearPeriod Year | PeriodBetween Day Day | PeriodFrom Day | PeriodTo Day | PeriodAll deriving (Eq,Ord,Show,Generic) instance Default Period where def = PeriodAll ---- Typical report period/subperiod durations, from a day to a year. --data Duration = -- DayLong -- WeekLong -- MonthLong -- QuarterLong -- YearLong -- deriving (Eq,Ord,Show,Generic) -- Ways in which a period can be divided into subperiods. data Interval = NoInterval | Days Int | Weeks Int | Months Int | Quarters Int | Years Int | DayOfMonth Int | WeekdayOfMonth Int Int | DaysOfWeek [Int] | DayOfYear Int Int -- Month, Day -- WeekOfYear Int -- MonthOfYear Int -- QuarterOfYear Int deriving (Eq,Show,Ord,Generic) instance Default Interval where def = NoInterval type Payee = Text type AccountName = Text data AccountType = Asset | Liability | Equity | Revenue | Expense | Cash -- ^ a subtype of Asset - liquid assets to show in cashflow report | Conversion -- ^ a subtype of Equity - account in which to generate conversion postings for transaction prices deriving (Eq,Ord,Generic) instance Show AccountType where show Asset = "A" show Liability = "L" show Equity = "E" show Revenue = "R" show Expense = "X" show Cash = "C" show Conversion = "V" isBalanceSheetAccountType :: AccountType -> Bool isBalanceSheetAccountType t = t `elem` [ Asset, Liability, Equity, Cash, Conversion ] isIncomeStatementAccountType :: AccountType -> Bool isIncomeStatementAccountType t = t `elem` [ Revenue, Expense ] -- | Check whether the first argument is a subtype of the second: either equal -- or one of the defined subtypes. isAccountSubtypeOf :: AccountType -> AccountType -> Bool isAccountSubtypeOf Asset Asset = True isAccountSubtypeOf Liability Liability = True isAccountSubtypeOf Equity Equity = True isAccountSubtypeOf Revenue Revenue = True isAccountSubtypeOf Expense Expense = True isAccountSubtypeOf Cash Cash = True isAccountSubtypeOf Cash Asset = True isAccountSubtypeOf Conversion Conversion = True isAccountSubtypeOf Conversion Equity = True isAccountSubtypeOf _ _ = False -- not worth the trouble, letters defined in accountdirectivep for now --instance Read AccountType -- where -- readsPrec _ ('A' : xs) = [(Asset, xs)] -- readsPrec _ ('L' : xs) = [(Liability, xs)] -- readsPrec _ ('E' : xs) = [(Equity, xs)] -- readsPrec _ ('R' : xs) = [(Revenue, xs)] -- readsPrec _ ('X' : xs) = [(Expense, xs)] -- readsPrec _ _ = [] data AccountAlias = BasicAlias AccountName AccountName | RegexAlias Regexp Replacement deriving (Eq, Read, Show, Ord, Generic) data Side = L | R deriving (Eq,Show,Read,Ord,Generic) -- | One of the decimal marks we support: either period or comma. type DecimalMark = Char isDecimalMark :: Char -> Bool isDecimalMark c = c == '.' || c == ',' -- | The basic numeric type used in amounts. type Quantity = Decimal -- The following is for hledger-web, and requires blaze-markup. -- Doing it here avoids needing a matching flag on the hledger-web package. instance ToMarkup Quantity where toMarkup = toMarkup . show deriving instance Generic (DecimalRaw a) -- | An amount's per-unit or total cost/selling price in another -- commodity, as recorded in the journal entry eg with @ or @@. -- "Cost", formerly AKA "transaction price". The amount is always positive. data AmountPrice = UnitPrice !Amount | TotalPrice !Amount deriving (Eq,Ord,Generic,Show) -- | Display style for an amount. data AmountStyle = AmountStyle { ascommodityside :: !Side, -- ^ does the symbol appear on the left or the right ? ascommodityspaced :: !Bool, -- ^ space between symbol and quantity ? asprecision :: !AmountPrecision, -- ^ number of digits displayed after the decimal point asdecimalpoint :: !(Maybe Char), -- ^ character used as decimal point: period or comma. Nothing means "unspecified, use default" asdigitgroups :: !(Maybe DigitGroupStyle) -- ^ style for displaying digit groups, if any } deriving (Eq,Ord,Read,Generic) instance Show AmountStyle where show AmountStyle{..} = concat [ "AmountStylePP \"" , show ascommodityside , show ascommodityspaced , show asprecision , show asdecimalpoint , show asdigitgroups , "..\"" ] -- | The "display precision" for a hledger amount, by which we mean -- the number of decimal digits to display to the right of the decimal mark. -- This can be from 0 to 255 digits (the maximum supported by the Decimal library), -- or NaturalPrecision meaning "show all significant decimal digits". data AmountPrecision = Precision !Word8 | NaturalPrecision deriving (Eq,Ord,Read,Show,Generic) -- | A style for displaying digit groups in the integer part of a -- floating point number. It consists of the character used to -- separate groups (comma or period, whichever is not used as decimal -- point), and the size of each group, starting with the one nearest -- the decimal point. The last group size is assumed to repeat. Eg, -- comma between thousands is DigitGroups ',' [3]. data DigitGroupStyle = DigitGroups !Char ![Word8] deriving (Eq,Ord,Read,Show,Generic) type CommoditySymbol = Text data Commodity = Commodity { csymbol :: CommoditySymbol, cformat :: Maybe AmountStyle } deriving (Show,Eq,Generic) --,Ord) data Amount = Amount { acommodity :: !CommoditySymbol, -- commodity symbol, or special value "AUTO" aquantity :: !Quantity, -- numeric quantity, or zero in case of "AUTO" astyle :: !AmountStyle, aprice :: !(Maybe AmountPrice) -- ^ the (fixed, transaction-specific) price for this amount, if any } deriving (Eq,Ord,Generic,Show) newtype MixedAmount = Mixed (M.Map MixedAmountKey Amount) deriving (Generic,Show) instance Eq MixedAmount where a == b = maCompare a b == EQ instance Ord MixedAmount where compare = maCompare -- | Compare two MixedAmounts, substituting 0 for the quantity of any missing -- commodities in either. maCompare :: MixedAmount -> MixedAmount -> Ordering maCompare (Mixed a) (Mixed b) = go (M.toList a) (M.toList b) where go xss@((kx,x):xs) yss@((ky,y):ys) = case compare kx ky of EQ -> compareQuantities (Just x) (Just y) <> go xs ys LT -> compareQuantities (Just x) Nothing <> go xs yss GT -> compareQuantities Nothing (Just y) <> go xss ys go ((_,x):xs) [] = compareQuantities (Just x) Nothing <> go xs [] go [] ((_,y):ys) = compareQuantities Nothing (Just y) <> go [] ys go [] [] = EQ compareQuantities = comparing (maybe 0 aquantity) <> comparing (maybe 0 totalprice) totalprice x = case aprice x of Just (TotalPrice p) -> aquantity p _ -> 0 -- | Stores the CommoditySymbol of the Amount, along with the CommoditySymbol of -- the price, and its unit price if being used. data MixedAmountKey = MixedAmountKeyNoPrice !CommoditySymbol | MixedAmountKeyTotalPrice !CommoditySymbol !CommoditySymbol | MixedAmountKeyUnitPrice !CommoditySymbol !CommoditySymbol !Quantity deriving (Eq,Generic,Show) -- | We don't auto-derive the Ord instance because it would give an undesired ordering. -- We want the keys to be sorted lexicographically: -- (1) By the primary commodity of the amount. -- (2) By the commodity of the price, with no price being first. -- (3) By the unit price, from most negative to most positive, with total prices -- before unit prices. -- For example, we would like the ordering to give -- MixedAmountKeyNoPrice "X" < MixedAmountKeyTotalPrice "X" "Z" < MixedAmountKeyNoPrice "Y" instance Ord MixedAmountKey where compare = comparing commodity <> comparing pCommodity <> comparing pPrice where commodity (MixedAmountKeyNoPrice c) = c commodity (MixedAmountKeyTotalPrice c _) = c commodity (MixedAmountKeyUnitPrice c _ _) = c pCommodity (MixedAmountKeyNoPrice _) = Nothing pCommodity (MixedAmountKeyTotalPrice _ pc) = Just pc pCommodity (MixedAmountKeyUnitPrice _ pc _) = Just pc pPrice (MixedAmountKeyNoPrice _) = Nothing pPrice (MixedAmountKeyTotalPrice _ _) = Nothing pPrice (MixedAmountKeyUnitPrice _ _ q) = Just q data PostingType = RegularPosting | VirtualPosting | BalancedVirtualPosting deriving (Eq,Show,Generic) type TagName = Text type TagValue = Text type Tag = (TagName, TagValue) -- ^ A tag name and (possibly empty) value. type DateTag = (TagName, Day) -- | The status of a transaction or posting, recorded with a status mark -- (nothing, !, or *). What these mean is ultimately user defined. data Status = Unmarked | Pending | Cleared deriving (Eq,Ord,Bounded,Enum,Generic) instance Show Status where -- custom show.. bad idea.. don't do it.. show Unmarked = "" show Pending = "!" show Cleared = "*" -- | A balance assertion is a declaration about an account's expected balance -- at a certain point (posting date and parse order). They provide additional -- error checking and readability to a journal file. -- -- A balance assignments is an instruction to hledger to adjust an -- account's balance to a certain amount at a certain point. -- -- The 'BalanceAssertion' type is used for representing both of these. -- -- hledger supports multiple kinds of balance assertions/assignments, -- which differ in whether they refer to a single commodity or all commodities, -- and the (subaccount-)inclusive or exclusive account balance. -- data BalanceAssertion = BalanceAssertion { baamount :: Amount, -- ^ the expected balance in a particular commodity batotal :: Bool, -- ^ disallow additional non-asserted commodities ? bainclusive :: Bool, -- ^ include subaccounts when calculating the actual balance ? baposition :: SourcePos -- ^ the assertion's file position, for error reporting } deriving (Eq,Generic,Show) data Posting = Posting { pdate :: Maybe Day, -- ^ this posting's date, if different from the transaction's pdate2 :: Maybe Day, -- ^ this posting's secondary date, if different from the transaction's pstatus :: Status, paccount :: AccountName, pamount :: MixedAmount, pcomment :: Text, -- ^ this posting's comment lines, as a single non-indented multi-line string ptype :: PostingType, ptags :: [Tag], -- ^ tag names and values, extracted from the posting comment -- and (after finalisation) the posting account's directive if any pbalanceassertion :: Maybe BalanceAssertion, -- ^ an expected balance in the account after this posting, -- in a single commodity, excluding subaccounts. ptransaction :: Maybe Transaction, -- ^ this posting's parent transaction (co-recursive types). -- Tying this knot gets tedious, Maybe makes it easier/optional. poriginal :: Maybe Posting -- ^ When this posting has been transformed in some way -- (eg its amount or price was inferred, or the account name was -- changed by a pivot or budget report), this references the original -- untransformed posting (which will have Nothing in this field). } deriving (Generic) -- The equality test for postings ignores the parent transaction's -- identity, to avoid recurring ad infinitum. -- XXX could check that it's Just or Nothing. instance Eq Posting where (==) (Posting a1 b1 c1 d1 e1 f1 g1 h1 i1 _ _) (Posting a2 b2 c2 d2 e2 f2 g2 h2 i2 _ _) = a1==a2 && b1==b2 && c1==c2 && d1==d2 && e1==e2 && f1==f2 && g1==g2 && h1==h2 && i1==i2 -- | Posting's show instance elides the parent transaction so as not to recurse forever. instance Show Posting where show Posting{..} = "PostingPP {" ++ intercalate ", " [ "pdate=" ++ show (show pdate) ,"pdate2=" ++ show (show pdate2) ,"pstatus=" ++ show (show pstatus) ,"paccount=" ++ show paccount ,"pamount=" ++ show pamount ,"pcomment=" ++ show pcomment ,"ptype=" ++ show ptype ,"ptags=" ++ show ptags ,"pbalanceassertion=" ++ show pbalanceassertion ,"ptransaction=" ++ show (ptransaction $> "txn") ,"poriginal=" ++ show poriginal ] ++ "}" data Transaction = Transaction { tindex :: Integer, -- ^ this transaction's 1-based position in the transaction stream, or 0 when not available tprecedingcomment :: Text, -- ^ any comment lines immediately preceding this transaction tsourcepos :: (SourcePos, SourcePos), -- ^ the file position where the date starts, and where the last posting ends tdate :: Day, tdate2 :: Maybe Day, tstatus :: Status, tcode :: Text, tdescription :: Text, tcomment :: Text, -- ^ this transaction's comment lines, as a single non-indented multi-line string ttags :: [Tag], -- ^ tag names and values, extracted from the comment tpostings :: [Posting] -- ^ this transaction's postings } deriving (Eq,Generic,Show) -- | A transaction modifier rule. This has a query which matches postings -- in the journal, and a list of transformations to apply to those -- postings or their transactions. Currently there is one kind of transformation: -- the TMPostingRule, which adds a posting ("auto posting") to the transaction, -- optionally setting its amount to the matched posting's amount multiplied by a constant. data TransactionModifier = TransactionModifier { tmquerytxt :: Text, tmpostingrules :: [TMPostingRule] } deriving (Eq,Generic,Show) nulltransactionmodifier = TransactionModifier{ tmquerytxt = "" ,tmpostingrules = [] } -- | A transaction modifier transformation, which adds an extra posting -- to the matched posting's transaction. -- Can be like a regular posting, or can have the tmprIsMultiplier flag set, -- indicating that it's a multiplier for the matched posting's amount. data TMPostingRule = TMPostingRule { tmprPosting :: Posting , tmprIsMultiplier :: Bool } deriving (Eq,Generic,Show) -- | A periodic transaction rule, describing a transaction that recurs. data PeriodicTransaction = PeriodicTransaction { ptperiodexpr :: Text, -- ^ the period expression as written ptinterval :: Interval, -- ^ the interval at which this transaction recurs ptspan :: DateSpan, -- ^ the (possibly unbounded) period during which this transaction recurs. Contains a whole number of intervals. -- ptsourcepos :: (SourcePos, SourcePos), -- ^ the file position where the period expression starts, and where the last posting ends ptstatus :: Status, -- ^ some of Transaction's fields ptcode :: Text, ptdescription :: Text, ptcomment :: Text, pttags :: [Tag], ptpostings :: [Posting] } deriving (Eq,Generic) -- , Show in PeriodicTransaction.hs nullperiodictransaction = PeriodicTransaction{ ptperiodexpr = "" ,ptinterval = def ,ptspan = def ,ptsourcepos = (SourcePos "" (mkPos 1) (mkPos 1), SourcePos "" (mkPos 1) (mkPos 1)) ,ptstatus = Unmarked ,ptcode = "" ,ptdescription = "" ,ptcomment = "" ,pttags = [] ,ptpostings = [] } data TimeclockCode = SetBalance | SetRequiredHours | In | Out | FinalOut deriving (Eq,Ord,Generic) data TimeclockEntry = TimeclockEntry { tlsourcepos :: SourcePos, tlcode :: TimeclockCode, tldatetime :: LocalTime, tlaccount :: AccountName, tldescription :: Text, tlcomment :: Text, tltags :: [Tag] } deriving (Eq,Ord,Generic) -- | A market price declaration made by the journal format's P directive. -- It declares two things: a historical exchange rate between two commodities, -- and an amount display style for the second commodity. data PriceDirective = PriceDirective { pddate :: Day ,pdcommodity :: CommoditySymbol ,pdamount :: Amount } deriving (Eq,Ord,Generic,Show) -- | A historical market price (exchange rate) from one commodity to another. -- A more concise form of a PriceDirective, without the amount display info. data MarketPrice = MarketPrice { mpdate :: Day -- ^ Date on which this price becomes effective. ,mpfrom :: CommoditySymbol -- ^ The commodity being converted from. ,mpto :: CommoditySymbol -- ^ The commodity being converted to. ,mprate :: Quantity -- ^ One unit of the "from" commodity is worth this quantity of the "to" commodity. } deriving (Eq,Ord,Generic, Show) -- additional valuation-related types in Valuation.hs -- | A Journal, containing transactions and various other things. -- The basic data model for hledger. -- -- This is used during parsing (as the type alias ParsedJournal), and -- then finalised/validated for use as a Journal. Some extra -- parsing-related fields are included for convenience, at least for -- now. In a ParsedJournal these are updated as parsing proceeds, in a -- Journal they represent the final state at end of parsing (used eg -- by the add command). -- data Journal = Journal { -- parsing-related data jparsedefaultyear :: Maybe Year -- ^ the current default year, specified by the most recent Y directive (or current date) ,jparsedefaultcommodity :: Maybe (CommoditySymbol,AmountStyle) -- ^ the current default commodity and its format, specified by the most recent D directive ,jparsedecimalmark :: Maybe DecimalMark -- ^ the character to always parse as decimal point, if set by CsvReader's decimal-mark (or a future journal directive) ,jparseparentaccounts :: [AccountName] -- ^ the current stack of parent account names, specified by apply account directives ,jparsealiases :: [AccountAlias] -- ^ the current account name aliases in effect, specified by alias directives (& options ?) -- ,jparsetransactioncount :: Integer -- ^ the current count of transactions parsed so far (only journal format txns, currently) ,jparsetimeclockentries :: [TimeclockEntry] -- ^ timeclock sessions which have not been clocked out ,jincludefilestack :: [FilePath] -- principal data ,jdeclaredpayees :: [(Payee,PayeeDeclarationInfo)] -- ^ Payees declared by payee directives, in parse order (after journal finalisation) ,jdeclaredtags :: [(TagName,TagDeclarationInfo)] -- ^ Tags declared by tag directives, in parse order (after journal finalisation) ,jdeclaredaccounts :: [(AccountName,AccountDeclarationInfo)] -- ^ Accounts declared by account directives, in parse order (after journal finalisation) ,jdeclaredaccounttags :: M.Map AccountName [Tag] -- ^ Accounts which have tags declared in their directives, and those tags. (Does not include parents' tags.) ,jdeclaredaccounttypes :: M.Map AccountType [AccountName] -- ^ Accounts whose type has been explicitly declared in their account directives, grouped by type. ,jaccounttypes :: M.Map AccountName AccountType -- ^ All accounts for which a type has been declared or can be inferred from its parent or its name. ,jglobalcommoditystyles :: M.Map CommoditySymbol AmountStyle -- ^ per-commodity display styles declared globally, eg by command line option or import command ,jcommodities :: M.Map CommoditySymbol Commodity -- ^ commodities and formats declared by commodity directives ,jinferredcommodities :: M.Map CommoditySymbol AmountStyle -- ^ commodities and formats inferred from journal amounts ,jpricedirectives :: [PriceDirective] -- ^ Declarations of market prices by P directives, in parse order (after journal finalisation) ,jinferredmarketprices :: [MarketPrice] -- ^ Market prices implied by transactions, in parse order (after journal finalisation) ,jtxnmodifiers :: [TransactionModifier] ,jperiodictxns :: [PeriodicTransaction] ,jtxns :: [Transaction] ,jfinalcommentlines :: Text -- ^ any final trailing comments in the (main) journal file ,jfiles :: [(FilePath, Text)] -- ^ the file path and raw text of the main and -- any included journal files. The main file is first, -- followed by any included files in the order encountered. -- TODO: FilePath is a sloppy type here, don't assume it's a -- real file; values like "", "-", "(string)" can be seen ,jlastreadtime :: POSIXTime -- ^ when this journal was last read from its file(s) -- NOTE: after adding new fields, eg involving account names, consider updating -- the Anon instance in Hleger.Cli.Anon } deriving (Eq, Generic) -- | A journal in the process of being parsed, not yet finalised. -- The data is partial, and list fields are in reverse order. type ParsedJournal = Journal -- | The id of a data format understood by hledger, eg @journal@ or @csv@. -- The --output-format option selects one of these for output. type StorageFormat = String -- | Extra information found in a payee directive. data PayeeDeclarationInfo = PayeeDeclarationInfo { pdicomment :: Text -- ^ any comment lines following the payee directive ,pditags :: [Tag] -- ^ tags extracted from the comment, if any } deriving (Eq,Show,Generic) nullpayeedeclarationinfo = PayeeDeclarationInfo { pdicomment = "" ,pditags = [] } -- | Extra information found in a tag directive. newtype TagDeclarationInfo = TagDeclarationInfo { tdicomment :: Text -- ^ any comment lines following the tag directive. No tags allowed here. } deriving (Eq,Show,Generic) nulltagdeclarationinfo = TagDeclarationInfo { tdicomment = "" } -- | Extra information about an account that can be derived from -- its account directive (and the other account directives). data AccountDeclarationInfo = AccountDeclarationInfo { adicomment :: Text -- ^ any comment lines following an account directive for this account ,aditags :: [Tag] -- ^ tags extracted from the account comment, if any ,adideclarationorder :: Int -- ^ the order in which this account was declared, -- relative to other account declarations, during parsing (1..) ,adisourcepos :: SourcePos -- ^ source file and position } deriving (Eq,Show,Generic) nullaccountdeclarationinfo = AccountDeclarationInfo { adicomment = "" ,aditags = [] ,adideclarationorder = 0 ,adisourcepos = SourcePos "" (mkPos 1) (mkPos 1) } -- | An account, with its balances, parent/subaccount relationships, etc. -- Only the name is required; the other fields are added when needed. data Account = Account { aname :: AccountName -- ^ this account's full name ,adeclarationinfo :: Maybe AccountDeclarationInfo -- ^ optional extra info from account directives -- relationships in the tree ,asubs :: [Account] -- ^ this account's sub-accounts ,aparent :: Maybe Account -- ^ parent account ,aboring :: Bool -- ^ used in the accounts report to label elidable parents -- balance information ,anumpostings :: Int -- ^ the number of postings to this account ,aebalance :: MixedAmount -- ^ this account's balance, excluding subaccounts ,aibalance :: MixedAmount -- ^ this account's balance, including subaccounts } deriving (Generic) -- | Whether an account's balance is normally a positive number (in -- accounting terms, a debit balance) or a negative number (credit balance). -- Assets and expenses are normally positive (debit), while liabilities, equity -- and income are normally negative (credit). -- https://en.wikipedia.org/wiki/Normal_balance data NormalSign = NormallyPositive | NormallyNegative deriving (Show, Eq) -- | A Ledger has the journal it derives from, and the accounts -- derived from that. Accounts are accessible both list-wise and -- tree-wise, since each one knows its parent and subs; the first -- account is the root of the tree and always exists. data Ledger = Ledger { ljournal :: Journal ,laccounts :: [Account] } deriving (Generic) hledger-lib-1.30/Hledger/Data/Valuation.hs0000644000000000000000000005611714434445206016542 0ustar0000000000000000{-| Convert amounts to some related value in various ways. This involves looking up historical market prices (exchange rates) between commodities. -} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE DeriveGeneric #-} module Hledger.Data.Valuation ( ConversionOp(..) ,ValuationType(..) ,PriceOracle ,journalPriceOracle ,mixedAmountToCost ,mixedAmountApplyValuation ,mixedAmountValueAtDate ,mixedAmountApplyGain ,mixedAmountGainAtDate ,marketPriceReverse ,priceDirectiveToMarketPrice ,amountPriceDirectiveFromCost -- ,priceLookup ,tests_Valuation ) where import Control.Applicative ((<|>)) import Data.Function ((&), on) import Data.List (partition, intercalate, sortBy) import Data.List.Extra (nubSortBy) import qualified Data.Map as M import qualified Data.Set as S import qualified Data.Text as T import Data.Time.Calendar (Day, fromGregorian) import Data.MemoUgly (memo) import GHC.Generics (Generic) import Safe (headMay, lastMay) import Hledger.Utils import Hledger.Data.Types import Hledger.Data.Amount import Hledger.Data.Dates (nulldate) import Text.Printf (printf) ------------------------------------------------------------------------------ -- Types -- | Which operation to perform on conversion transactions. -- (There was also an "infer equity postings" operation, but that is now done -- earlier, in journal finalisation.) data ConversionOp = NoConversionOp | ToCost deriving (Show,Eq) -- | What kind of value conversion should be done on amounts ? -- CLI: --value=then|end|now|DATE[,COMM] data ValuationType = AtThen (Maybe CommoditySymbol) -- ^ convert to default or given valuation commodity, using market prices at each posting's date | AtEnd (Maybe CommoditySymbol) -- ^ convert to default or given valuation commodity, using market prices at period end(s) | AtNow (Maybe CommoditySymbol) -- ^ convert to default or given valuation commodity, using current market prices | AtDate Day (Maybe CommoditySymbol) -- ^ convert to default or given valuation commodity, using market prices on some date deriving (Show,Eq) -- | A price oracle is a magic memoising function that efficiently -- looks up market prices (exchange rates) from one commodity to -- another (or if unspecified, to a default valuation commodity) on a -- given date. type PriceOracle = (Day, CommoditySymbol, Maybe CommoditySymbol) -> Maybe (CommoditySymbol, Quantity) -- | Generate a price oracle (memoising price lookup function) from a -- journal's directive-declared and transaction-inferred market -- prices. For best performance, generate this only once per journal, -- reusing it across reports if there are more than one, as -- compoundBalanceCommand does. -- The boolean argument is whether to infer market prices from -- transactions or not. journalPriceOracle :: Bool -> Journal -> PriceOracle journalPriceOracle infer Journal{jpricedirectives, jinferredmarketprices} = let declaredprices = map priceDirectiveToMarketPrice jpricedirectives inferredprices = if infer then jinferredmarketprices else [] makepricegraph = memo $ makePriceGraph declaredprices inferredprices in memo $ uncurry3 $ priceLookup makepricegraph priceDirectiveToMarketPrice :: PriceDirective -> MarketPrice priceDirectiveToMarketPrice PriceDirective{..} = MarketPrice{ mpdate = pddate , mpfrom = pdcommodity , mpto = acommodity pdamount , mprate = aquantity pdamount } -- | Make one or more `MarketPrice` from an 'Amount' and its price directives. amountPriceDirectiveFromCost :: Day -> Amount -> Maybe PriceDirective amountPriceDirectiveFromCost d amt@Amount{acommodity=fromcomm, aquantity=fromq} = case aprice amt of Just (UnitPrice pa) -> Just $ pd{pdamount=pa} Just (TotalPrice pa) | fromq /= 0 -> Just $ pd{pdamount=fromq `divideAmountExtraPrecision` pa} _ -> Nothing where pd = PriceDirective{pddate = d, pdcommodity = fromcomm, pdamount = nullamt} divideAmountExtraPrecision n a = (n `divideAmount` a) { astyle = style' } where style' = (astyle a) { asprecision = precision' } precision' = case asprecision (astyle a) of NaturalPrecision -> NaturalPrecision Precision p -> Precision $ (numDigitsInt $ truncate n) + p ------------------------------------------------------------------------------ -- Converting things to value -- | Convert all component amounts to cost/selling price if requested, and style them. mixedAmountToCost :: M.Map CommoditySymbol AmountStyle -> ConversionOp -> MixedAmount -> MixedAmount mixedAmountToCost styles cost = mapMixedAmount (amountToCost styles cost) -- | Apply a specified valuation to this mixed amount, using the -- provided price oracle, commodity styles, and reference dates. -- See amountApplyValuation. mixedAmountApplyValuation :: PriceOracle -> M.Map CommoditySymbol AmountStyle -> Day -> Day -> Day -> ValuationType -> MixedAmount -> MixedAmount mixedAmountApplyValuation priceoracle styles periodlast today postingdate v = mapMixedAmount (amountApplyValuation priceoracle styles periodlast today postingdate v) -- | Convert an Amount to its cost if requested, and style it appropriately. amountToCost :: M.Map CommoditySymbol AmountStyle -> ConversionOp -> Amount -> Amount amountToCost styles ToCost = styleAmount styles . amountCost amountToCost _ NoConversionOp = id -- | Apply a specified valuation to this amount, using the provided -- price oracle, and reference dates. Also fix up its display style -- using the provided commodity styles. -- -- When the valuation requires converting to another commodity, a -- valuation (conversion) date is chosen based on the valuation type -- and the provided reference dates. It will be one of: -- -- - the date of the posting itself (--value=then) -- -- - the provided "period end" date - this is typically the last day -- of a subperiod (--value=end with a multi-period report), or of -- the specified report period or the journal (--value=end with a -- single-period report). -- -- - the provided "today" date (--value=now). -- -- - a fixed date specified by the ValuationType itself -- (--value=DATE). -- -- This is all a bit complicated. See the reference doc at -- https://hledger.org/hledger.html#effect-of-valuation-on-reports -- (hledger_options.m4.md "Effect of valuation on reports"), and #1083. -- amountApplyValuation :: PriceOracle -> M.Map CommoditySymbol AmountStyle -> Day -> Day -> Day -> ValuationType -> Amount -> Amount amountApplyValuation priceoracle styles periodlast today postingdate v a = case v of AtThen mc -> amountValueAtDate priceoracle styles mc postingdate a AtEnd mc -> amountValueAtDate priceoracle styles mc periodlast a AtNow mc -> amountValueAtDate priceoracle styles mc today a AtDate d mc -> amountValueAtDate priceoracle styles mc d a -- | Find the market value of each component amount in the given -- commodity, or its default valuation commodity, at the given -- valuation date, using the given market price oracle. -- When market prices available on that date are not sufficient to -- calculate the value, amounts are left unchanged. mixedAmountValueAtDate :: PriceOracle -> M.Map CommoditySymbol AmountStyle -> Maybe CommoditySymbol -> Day -> MixedAmount -> MixedAmount mixedAmountValueAtDate priceoracle styles mc d = mapMixedAmount (amountValueAtDate priceoracle styles mc d) -- | Find the market value of this amount in the given valuation -- commodity if any, otherwise the default valuation commodity, at the -- given valuation date. (The default valuation commodity is the -- commodity of the latest applicable market price before the -- valuation date.) -- -- The returned amount will have its commodity's canonical style applied, -- but with the precision adjusted to show all significant decimal digits -- up to a maximum of 8. (experimental) -- -- If the market prices available on that date are not sufficient to -- calculate this value, the amount is left unchanged. amountValueAtDate :: PriceOracle -> M.Map CommoditySymbol AmountStyle -> Maybe CommoditySymbol -> Day -> Amount -> Amount amountValueAtDate priceoracle styles mto d a = case priceoracle (d, acommodity a, mto) of Nothing -> a Just (comm, rate) -> -- setNaturalPrecisionUpTo 8 $ -- XXX force higher precision in case amount appears to be zero ? -- Make default display style use precision 2 instead of 0 ? -- Leave as is for now; mentioned in manual. styleAmount styles nullamt{acommodity=comm, aquantity=rate * aquantity a} -- | Calculate the gain of each component amount, that is the difference -- between the valued amount and the value of the cost basis (see -- mixedAmountApplyValuation). -- -- If the commodity we are valuing in is not the same as the commodity of the -- cost, this will value the cost at the same date as the primary amount. This -- may not be what you want; for example you may want the cost valued at the -- posting date. If so, let us know and we can change this behaviour. mixedAmountApplyGain :: PriceOracle -> M.Map CommoditySymbol AmountStyle -> Day -> Day -> Day -> ValuationType -> MixedAmount -> MixedAmount mixedAmountApplyGain priceoracle styles periodlast today postingdate v ma = mixedAmountApplyValuation priceoracle styles periodlast today postingdate v $ ma `maMinus` mixedAmountCost ma -- | Calculate the gain of each component amount, that is the -- difference between the valued amount and the value of the cost basis. -- -- If the commodity we are valuing in is not the same as the commodity of the -- cost, this will value the cost at the same date as the primary amount. This -- may not be what you want; for example you may want the cost valued at the -- posting date. If so, let us know and we can change this behaviour. mixedAmountGainAtDate :: PriceOracle -> M.Map CommoditySymbol AmountStyle -> Maybe CommoditySymbol -> Day -> MixedAmount -> MixedAmount mixedAmountGainAtDate priceoracle styles mto d ma = mixedAmountValueAtDate priceoracle styles mto d $ ma `maMinus` mixedAmountCost ma ------------------------------------------------------------------------------ -- Market price lookup -- | Given a memoising price graph generator, a valuation date, a -- source commodity and an optional valuation commodity, find the -- value on that date of one unit of the source commodity in the -- valuation commodity, or in a default valuation commodity. Returns -- the valuation commodity that was specified or chosen, and the -- quantity of it that one unit of the source commodity is worth. Or -- if no applicable market price can be found or calculated, or if the -- source commodity and the valuation commodity are the same, returns -- Nothing. -- -- See makePriceGraph for how prices are determined. -- Note that both market prices and default valuation commodities can -- vary with valuation date, since that determines which market prices -- are visible. -- priceLookup :: (Day -> PriceGraph) -> Day -> CommoditySymbol -> Maybe CommoditySymbol -> Maybe (CommoditySymbol, Quantity) priceLookup makepricegraph d from mto = -- trace ("priceLookup ("++show d++", "++show from++", "++show mto++")") $ let PriceGraph{pgEdges=forwardprices ,pgEdgesRev=allprices ,pgDefaultValuationCommodities=defaultdests } = traceOrLogAt 1 ("valuation date: "++show d) $ makepricegraph d mto' = mto <|> mdefaultto where mdefaultto = dbg1 ("default valuation commodity for "++T.unpack from) $ M.lookup from defaultdests in case mto' of Nothing -> Nothing Just to | to==from -> Nothing Just to -> -- We have a commodity to convert to. Find the most direct price available, -- according to the rules described in makePriceGraph. let msg = printf "seeking %s to %s price" (showCommoditySymbol from) (showCommoditySymbol to) in case (traceOrLogAt 2 (msg++" using forward prices") $ pricesShortestPath from to forwardprices) <|> (traceOrLogAt 2 (msg++" using forward and reverse prices") $ pricesShortestPath from to allprices) of Nothing -> Nothing Just [] -> Nothing Just ps -> Just (mpto $ last ps, product $ map mprate ps) tests_priceLookup = let p y m d from q to = MarketPrice{mpdate=fromGregorian y m d, mpfrom=from, mpto=to, mprate=q} ps1 = [ p 2000 01 01 "A" 10 "B" ,p 2000 01 01 "B" 10 "C" ,p 2000 01 01 "C" 10 "D" ,p 2000 01 01 "E" 2 "D" ,p 2001 01 01 "A" 11 "B" ] makepricegraph = makePriceGraph ps1 [] in testCase "priceLookup" $ do priceLookup makepricegraph (fromGregorian 1999 01 01) "A" Nothing @?= Nothing priceLookup makepricegraph (fromGregorian 2000 01 01) "A" Nothing @?= Just ("B",10) priceLookup makepricegraph (fromGregorian 2000 01 01) "B" (Just "A") @?= Just ("A",0.1) priceLookup makepricegraph (fromGregorian 2000 01 01) "A" (Just "E") @?= Just ("E",500) ------------------------------------------------------------------------------ -- Market price graph -- built directly with MarketPrices for now, probably space-inefficient type Edge = MarketPrice type Path = [Edge] data PriceGraph = PriceGraph { pgDate :: Day -- ^ The date on which these prices are in effect. ,pgEdges :: [Edge] -- ^ "Forward" exchange rates between commodity pairs, either -- declared by P directives or inferred from transaction prices, -- forming the edges of a directed graph. ,pgEdgesRev :: [Edge] -- ^ The same edges, plus any additional edges that can be -- inferred by reversing them and inverting the rates. -- -- In both of these there will be at most one edge between each -- directed pair of commodities, eg there can be one USD->EUR and one EUR->USD. ,pgDefaultValuationCommodities :: M.Map CommoditySymbol CommoditySymbol -- ^ The default valuation commodity for each source commodity. -- These are used when a valuation commodity is not specified -- (-V). They are the destination commodity of each source commodity's -- latest (declared or inferred, but not reverse) market price -- (on the date of this graph). } deriving (Show,Generic) -- | Find the shortest path and corresponding conversion rate, if any, -- from one commodity to another using the provided market prices which -- form the edges of a directed graph. There should be at most one edge -- between each directed pair of commodities, eg there can be one -- USD->EUR price and one EUR->USD price. pricesShortestPath :: CommoditySymbol -> CommoditySymbol -> [Edge] -> Maybe Path pricesShortestPath start end edges = -- at --debug=2 +, print the pretty path and also the detailed prices let label = printf "shortest path from %s to %s: " (showCommoditySymbol start) (showCommoditySymbol end) in fmap (dbg2With (("price chain:\n"++).pshow)) $ dbg2With ((label++).(maybe "none found" (pshowpath ""))) $ find [([],edges)] where -- Find the first and shortest complete path using a breadth-first search. find :: [(Path,[Edge])] -> Maybe Path find paths = case concatMap extend paths of [] -> Nothing _ | pathlength > maxpathlength -> traceOrLog ("gave up searching for a price chain at length "++show maxpathlength++", please report a bug") Nothing where pathlength = 2 + maybe 0 (length . fst) (headMay paths) maxpathlength = 1000 paths' -> case completepaths of p:_ -> Just p -- the left-most complete path at this length [] -> find paths' where completepaths = [p | (p,_) <- paths', (mpto <$> lastMay p) == Just end] -- Use all applicable edges from those provided to extend this path by one step, -- returning zero or more new (path, remaining edges) pairs. extend :: (Path,[Edge]) -> [(Path,[Edge])] extend (path,unusededges) = let pathnodes = start : map mpto path pathend = maybe start mpto $ lastMay path (nextedges,remainingedges) = partition ((==pathend).mpfrom) unusededges in [ (path', remainingedges') | e <- nextedges , let path' = dbgpath "trying" $ path ++ [e] -- PERF prepend ? , let pathnodes' = mpto e : pathnodes , let remainingedges' = [r | r <- remainingedges, mpto r `notElem` pathnodes' ] ] -- debug helpers dbgpath label = dbg2With (pshowpath label) -- dbgedges label = dbg2With (pshowedges label) pshowpath label = \case [] -> prefix label "" p@(e:_) -> prefix label $ pshownode (mpfrom e) ++ ">" ++ intercalate ">" (map (pshownode . mpto) p) -- pshowedges label = prefix label . intercalate ", " . map (pshowedge "") -- pshowedge label MarketPrice{..} = pshowedge' label mpfrom mpto -- pshowedge' label from to = prefix label $ pshownode from ++ ">" ++ pshownode to pshownode = T.unpack . showCommoditySymbol prefix l = if null l then (""++) else ((l++": ")++) -- | A snapshot of the known exchange rates between commodity pairs at a given date. -- This is a home-made version, more tailored to our needs. -- | Build the graph of commodity conversion prices for a given day. -- Converts a list of declared market prices in parse order, and a -- list of transaction-inferred market prices in parse order, to: -- -- 1. a graph of all known exchange rates declared or inferred from -- one commodity to another in effect on that day -- -- 2. a second graph which includes any additional exchange rates -- that can be inferred by reversing known rates -- -- 3. a map of each commodity's default valuation commodity, if any. -- -- These allow price lookup and valuation to be performed as -- described in hledger.m4.md -> Valuation: -- -- "hledger looks for a market price (exchange rate) from commodity A -- to commodity B in one or more of these ways, in this order of -- preference: -- -- 1. A *declared market price* or *inferred market price*: -- A's latest market price in B on or before the valuation date -- as declared by a P directive, or (with the `--infer-market-prices` flag) -- inferred from transaction prices. -- -- 2. A *reverse market price*: -- the inverse of a declared or inferred market price from B to A. -- -- 3. A *a forward chain of market prices*: -- a synthetic price formed by combining the shortest chain of -- "forward" (only 1 above) market prices, leading from A to B. -- -- 4. A *any chain of market prices*: -- a chain of any market prices, including both forward and -- reverse prices (1 and 2 above), leading from A to B." -- -- and: "For each commodity A, hledger picks a default valuation -- commodity as follows, in this order of preference: -- -- 1. The price commodity from the latest declared market price for A -- on or before valuation date. -- -- 2. The price commodity from the latest declared market price for A -- on any date. (Allows conversion to proceed if there are inferred -- prices before the valuation date.) -- -- 3. If there are no P directives at all (any commodity or date), and -- the `--infer-market-prices` flag is used, then the price commodity from -- the latest transaction price for A on or before valuation date." -- makePriceGraph :: [MarketPrice] -> [MarketPrice] -> Day -> PriceGraph makePriceGraph alldeclaredprices allinferredprices d = dbg9 ("makePriceGraph "++show d) $ PriceGraph{ pgDate = d ,pgEdges=forwardprices ,pgEdgesRev=allprices ,pgDefaultValuationCommodities=defaultdests } where -- prices in effect on date d, either declared or inferred visibledeclaredprices = dbg9 "visibledeclaredprices" $ filter ((<=d).mpdate) alldeclaredprices visibleinferredprices = dbg9 "visibleinferredprices" $ filter ((<=d).mpdate) allinferredprices forwardprices = effectiveMarketPrices visibledeclaredprices visibleinferredprices -- infer any additional reverse prices not already declared or inferred reverseprices = dbg9 "additional reverse prices" $ [p | p@MarketPrice{..} <- map marketPriceReverse forwardprices , not $ (mpfrom,mpto) `S.member` forwardpairs ] where forwardpairs = S.fromList [(mpfrom,mpto) | MarketPrice{..} <- forwardprices] allprices = forwardprices ++ reverseprices -- determine a default valuation commodity for each source commodity -- somewhat but not quite like effectiveMarketPrices defaultdests = M.fromList [(mpfrom,mpto) | MarketPrice{..} <- pricesfordefaultcomms] where pricesfordefaultcomms = dbg9 "prices for choosing default valuation commodities, by date then parse order" $ ps & zip [1..] -- label items with their parse order & sortBy (compare `on` (\(parseorder,MarketPrice{..})->(mpdate,parseorder))) -- sort by increasing date then increasing parse order & map snd -- discard labels where ps | not $ null visibledeclaredprices = visibledeclaredprices | not $ null alldeclaredprices = alldeclaredprices | otherwise = visibleinferredprices -- will be null without --infer-market-prices -- | Given a list of P-declared market prices in parse order and a -- list of transaction-inferred market prices in parse order, select -- just the latest prices that are in effect for each commodity pair. -- That is, for each commodity pair, the latest price by date then -- parse order, with declared prices having precedence over inferred -- prices on the same day. effectiveMarketPrices :: [MarketPrice] -> [MarketPrice] -> [MarketPrice] effectiveMarketPrices declaredprices inferredprices = let -- label each item with its same-day precedence, then parse order declaredprices' = [(1, i, p) | (i,p) <- zip [1..] declaredprices] inferredprices' = [(0, i, p) | (i,p) <- zip [1..] inferredprices] in dbg9 "effective forward prices" $ -- combine declaredprices' ++ inferredprices' -- sort by decreasing date then decreasing precedence then decreasing parse order & sortBy (flip compare `on` (\(precedence,parseorder,mp)->(mpdate mp,precedence,parseorder))) -- discard the sorting labels & map third3 -- keep only the first (ie the newest, highest precedence, latest parsed) price for each pair & nubSortBy (compare `on` (\(MarketPrice{..})->(mpfrom,mpto))) marketPriceReverse :: MarketPrice -> MarketPrice marketPriceReverse mp@MarketPrice{..} = mp{mpfrom=mpto, mpto=mpfrom, mprate=if mprate==0 then 0 else 1/mprate} -- PARTIAL: / nullmarketprice :: MarketPrice nullmarketprice = MarketPrice { mpdate=nulldate ,mpfrom="" ,mpto="" ,mprate=0 } ------------------------------------------------------------------------------ tests_Valuation = testGroup "Valuation" [ tests_priceLookup ,testCase "marketPriceReverse" $ do marketPriceReverse nullmarketprice{mprate=2} @?= nullmarketprice{mprate=0.5} marketPriceReverse nullmarketprice @?= nullmarketprice -- the reverse of a 0 price is a 0 price ] hledger-lib-1.30/Hledger/Query.hs0000644000000000000000000015231014434445206015024 0ustar0000000000000000{-| A general query system for matching things (accounts, postings, transactions..) by various criteria, and a SimpleTextParser for query expressions. -} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE TupleSections #-} module Hledger.Query ( -- * Query and QueryOpt Query(..), QueryOpt(..), OrdPlus(..), payeeTag, noteTag, generatedTransactionTag, -- * parsing parseQuery, parseQueryList, parseQueryTerm, parseAccountType, -- * modifying simplifyQuery, filterQuery, filterQueryOrNotQuery, matchesQuery, -- * predicates queryIsNull, queryIsDate, queryIsDate2, queryIsDateOrDate2, queryIsStatus, queryIsCode, queryIsDesc, queryIsTag, queryIsAcct, queryIsType, queryIsDepth, queryIsReal, queryIsAmt, queryIsSym, queryIsStartDateOnly, queryIsTransactionRelated, -- * accessors queryStartDate, queryEndDate, queryDateSpan, queryDateSpan', queryDepth, inAccount, inAccountQuery, -- * matching things with queries matchesTransaction, matchesTransactionExtra, matchesDescription, matchesPayeeWIP, matchesPosting, matchesPostingExtra, matchesAccount, matchesAccountExtra, matchesMixedAmount, matchesAmount, matchesCommodity, matchesTags, matchesPriceDirective, words'', queryprefixes, -- * tests tests_Query ) where import Control.Applicative ((<|>), many, optional) import Data.Default (Default(..)) import Data.Either (partitionEithers) import Data.List (partition, intercalate) import Data.Maybe (fromMaybe, isJust, mapMaybe) import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar (Day, fromGregorian ) import Safe (readDef, readMay, maximumByMay, maximumMay, minimumMay) import Text.Megaparsec (between, noneOf, sepBy, try, (), notFollowedBy) import Text.Megaparsec.Char (char, string, string') import Hledger.Utils hiding (words') import Hledger.Data.Types import Hledger.Data.AccountName import Hledger.Data.Amount (amountsRaw, mixedAmount, nullamt, usd) import Hledger.Data.Dates import Hledger.Data.Posting import Hledger.Data.Transaction -- | A query is a composition of search criteria, which can be used to -- match postings, transactions, accounts and more. data Query = -- compound queries Not Query -- ^ negate this match | And [Query] -- ^ match if all of these match | Or [Query] -- ^ match if any of these match -- no-op queries | Any -- ^ always match | None -- ^ never match -- data queries (in "standard" order, roughly as they appear in a transaction) | Date DateSpan -- ^ match primary dates in this date span | Date2 DateSpan -- ^ match secondary dates in this date span | StatusQ Status -- ^ match this txn/posting status | Code Regexp -- ^ match txn codes infix-matched by this regexp | Desc Regexp -- ^ match txn descriptions infix-matched by this regexp | Tag Regexp (Maybe Regexp) -- ^ match if a tag's name, and optionally its value, is infix-matched by the respective regexps | Acct Regexp -- ^ match account names infix-matched by this regexp | Type [AccountType] -- ^ match accounts whose type is one of these (or with no types, any account) | Depth Int -- ^ match if account depth is less than or equal to this value (or, sometimes used as a display option) | Real Bool -- ^ match postings with this "realness" value | Amt OrdPlus Quantity -- ^ match if the amount's numeric quantity is less than/greater than/equal to/unsignedly equal to some value | Sym Regexp -- ^ match if the commodity symbol is fully-matched by this regexp deriving (Eq,Show) instance Default Query where def = Any -- | Construct a payee tag payeeTag :: Maybe Text -> Either RegexError Query payeeTag = fmap (Tag (toRegexCI' "payee")) . maybe (pure Nothing) (fmap Just . toRegexCI) -- | Construct a note tag noteTag :: Maybe Text -> Either RegexError Query noteTag = fmap (Tag (toRegexCI' "note")) . maybe (pure Nothing) (fmap Just . toRegexCI) -- | Construct a generated-transaction tag generatedTransactionTag :: Query generatedTransactionTag = Tag (toRegexCI' "generated-transaction") Nothing -- | A more expressive Ord, used for amt: queries. The Abs* variants -- compare with the absolute value of a number, ignoring sign. data OrdPlus = Lt | LtEq | Gt | GtEq | Eq | AbsLt | AbsLtEq | AbsGt | AbsGtEq | AbsEq deriving (Show,Eq) -- | A query option changes a query's/report's behaviour and output in some way. data QueryOpt = QueryOptInAcctOnly AccountName -- ^ show an account register focussed on this account | QueryOptInAcct AccountName -- ^ as above but include sub-accounts in the account register -- | QueryOptCostBasis -- ^ show amounts converted to cost where possible -- | QueryOptDate2 -- ^ show secondary dates instead of primary dates deriving (Show, Eq) -- parsing -- -- | A query restricting the account(s) to be shown in the sidebar, if any. -- -- Just looks at the first query option. -- showAccountMatcher :: [QueryOpt] -> Maybe Query -- showAccountMatcher (QueryOptInAcctSubsOnly a:_) = Just $ Acct True $ accountNameToAccountRegex a -- showAccountMatcher _ = Nothing -- | A version of parseQueryList which acts on a single Text of -- space-separated terms. -- -- The usual shell quoting rules are assumed. When a pattern contains -- whitespace, it (or the whole term including prefix) should be enclosed -- in single or double quotes. -- -- A query term is either: -- -- 1. a search pattern, which matches on one or more fields, eg: -- -- acct:REGEXP - match the account name with a regular expression -- desc:REGEXP - match the transaction description -- date:PERIODEXP - match the date with a period expression -- -- The prefix indicates the field to match, or if there is no prefix -- account name is assumed. -- -- 2. a query option, which modifies the reporting behaviour in some -- way. There is currently one of these, which may appear only once: -- -- inacct:FULLACCTNAME -- -- Period expressions may contain relative dates, so a reference date is -- required to fully parse these. -- -- >>> parseQuery nulldate "expenses:dining out" -- Right (Or [Acct (RegexpCI "expenses:dining"),Acct (RegexpCI "out")],[]) -- -- >>> parseQuery nulldate "\"expenses:dining out\"" -- Right (Acct (RegexpCI "expenses:dining out"),[]) parseQuery :: Day -> T.Text -> Either String (Query,[QueryOpt]) parseQuery d t = parseQueryList d $ words'' queryprefixes t -- | Convert a list of space-separated queries to a single query -- -- Multiple terms are combined as follows: -- 1. multiple account patterns are OR'd together -- 2. multiple description patterns are OR'd together -- 3. multiple status patterns are OR'd together -- 4. then all terms are AND'd together parseQueryList :: Day -> [T.Text] -> Either String (Query, [QueryOpt]) parseQueryList d termstrs = do eterms <- mapM (parseQueryTerm d) termstrs let (pats, optss) = unzip eterms q = combineQueryList pats Right (q, concat optss) combineQueryList :: [Query] -> Query combineQueryList pats = q where (descpats, pats') = partition queryIsDesc pats (acctpats, pats'') = partition queryIsAcct pats' (statuspats, otherpats) = partition queryIsStatus pats'' q = simplifyQuery $ And $ [Or acctpats, Or descpats, Or statuspats] ++ otherpats -- XXX -- | Quote-and-prefix-aware version of words - don't split on spaces which -- are inside quotes, including quotes which may have one of the specified -- prefixes in front, and maybe an additional not: prefix in front of that. words'' :: [T.Text] -> T.Text -> [T.Text] words'' prefixes = fromparse . parsewith maybePrefixedQuotedPhrases -- XXX where maybePrefixedQuotedPhrases :: SimpleTextParser [T.Text] maybePrefixedQuotedPhrases = choice' [prefixedQuotedPattern, singleQuotedPattern, doubleQuotedPattern, patterns] `sepBy` (notFollowedBy (skipNonNewlineSpaces >> char ')') >> skipNonNewlineSpaces1) prefixedQuotedPattern :: SimpleTextParser T.Text prefixedQuotedPattern = do not' <- fromMaybe "" `fmap` (optional $ string "not:") let allowednexts | T.null not' = prefixes | otherwise = prefixes ++ [""] next <- choice' $ map string allowednexts let prefix :: T.Text prefix = not' <> next p <- singleQuotedPattern <|> doubleQuotedPattern return $ prefix <> stripquotes p singleQuotedPattern :: SimpleTextParser T.Text singleQuotedPattern = stripquotes . T.pack <$> between (char '\'') (char '\'') (many $ noneOf ("'" :: [Char])) doubleQuotedPattern :: SimpleTextParser T.Text doubleQuotedPattern = stripquotes . T.pack <$> between (char '"') (char '"') (many $ noneOf ("\"" :: [Char])) patterns :: SimpleTextParser T.Text patterns = T.pack <$> many (noneOf (" \n\r" :: [Char])) -- XXX -- keep synced with patterns below, excluding "not" queryprefixes :: [T.Text] queryprefixes = map (<>":") [ "inacctonly" ,"inacct" ,"amt" ,"code" ,"desc" ,"payee" ,"note" ,"acct" ,"date" ,"date2" ,"status" ,"cur" ,"real" ,"empty" ,"depth" ,"tag" ,"type" ,"expr" ] defaultprefix :: T.Text defaultprefix = "acct" -- -- | Parse the query string as a boolean tree of match patterns. -- parseQueryTerm :: String -> Query -- parseQueryTerm s = either (const (Any)) id $ runParser query () "" $ lexmatcher s -- lexmatcher :: String -> [String] -- lexmatcher s = words' s -- query :: GenParser String () Query -- query = undefined -- | Parse a single query term as either a query or a query option, -- or return an error message if parsing fails. parseQueryTerm :: Day -> T.Text -> Either String (Query, [QueryOpt]) parseQueryTerm _ (T.stripPrefix "inacctonly:" -> Just s) = Right (Any, [QueryOptInAcctOnly s]) parseQueryTerm _ (T.stripPrefix "inacct:" -> Just s) = Right (Any, [QueryOptInAcct s]) parseQueryTerm d (T.stripPrefix "not:" -> Just s) = case parseQueryTerm d s of Right (q, qopts) -> Right (Not q, qopts) Left err -> Left err parseQueryTerm _ (T.stripPrefix "code:" -> Just s) = (,[]) . Code <$> toRegexCI s parseQueryTerm _ (T.stripPrefix "desc:" -> Just s) = (,[]) . Desc <$> toRegexCI s parseQueryTerm _ (T.stripPrefix "payee:" -> Just s) = (,[]) <$> payeeTag (Just s) parseQueryTerm _ (T.stripPrefix "note:" -> Just s) = (,[]) <$> noteTag (Just s) parseQueryTerm _ (T.stripPrefix "acct:" -> Just s) = (,[]) . Acct <$> toRegexCI s parseQueryTerm d (T.stripPrefix "date2:" -> Just s) = case parsePeriodExpr d s of Left e -> Left $ "\"date2:"++T.unpack s++"\" gave a "++showDateParseError e Right (_,spn) -> Right (Date2 spn, []) parseQueryTerm d (T.stripPrefix "date:" -> Just s) = case parsePeriodExpr d s of Left e -> Left $ "\"date:"++T.unpack s++"\" gave a "++showDateParseError e Right (_,spn) -> Right (Date spn, []) parseQueryTerm _ (T.stripPrefix "status:" -> Just s) = case parseStatus s of Left e -> Left $ "\"status:"++T.unpack s++"\" gave a parse error: " ++ e Right st -> Right (StatusQ st, []) parseQueryTerm _ (T.stripPrefix "real:" -> Just s) = Right (Real $ parseBool s || T.null s, []) parseQueryTerm _ (T.stripPrefix "amt:" -> Just s) = Right (Amt ord q, []) where (ord, q) = either error id $ parseAmountQueryTerm s -- PARTIAL: parseQueryTerm _ (T.stripPrefix "depth:" -> Just s) | n >= 0 = Right (Depth n, []) | otherwise = Left "depth: should have a positive number" where n = readDef 0 (T.unpack s) parseQueryTerm _ (T.stripPrefix "cur:" -> Just s) = (,[]) . Sym <$> toRegexCI ("^" <> s <> "$") -- support cur: as an alias parseQueryTerm _ (T.stripPrefix "tag:" -> Just s) = (,[]) <$> parseTag s parseQueryTerm _ (T.stripPrefix "type:" -> Just s) = (,[]) <$> parseTypeCodes s parseQueryTerm d (T.stripPrefix "expr:" -> Just s) = parseBooleanQuery d s parseQueryTerm _ "" = Right (Any, []) parseQueryTerm d s = parseQueryTerm d $ defaultprefix<>":"<>s -- | Parses a boolean query expression. -- -- Boolean queries combine smaller queries into larger ones. The boolean operators -- made available through this function are "NOT e", "e AND e", "e OR e", and "e e". -- Query options defined in multiple sub-queries are simply combined by concatenating -- all options into one list. -- -- Boolean operators in queries take precedence over one another. For instance, the -- prefix-operator "NOT e" is always parsed before "e AND e", "e AND e" before "e OR e", -- and "e OR e" before "e e". -- -- The space-separation operator is left as it was the default before the introduction of -- boolean operators. It takes the behaviour defined in the interpretQueryList function, -- whereas the NOT, OR, and AND operators simply wrap a list of queries with the associated -- -- -- The result of this function is either an error encountered during parsing of the -- expression or the combined query and query options. -- -- >>> parseBooleanQuery nulldate "expenses:dining AND out" -- Right (And [Acct (RegexpCI "expenses:dining"),Acct (RegexpCI "out")],[]) -- -- >>> parseBooleanQuery nulldate "expenses:dining AND desc:a OR desc:b" -- Right (Or [And [Acct (RegexpCI "expenses:dining"),Desc (RegexpCI "a")],Desc (RegexpCI "b")],[]) parseBooleanQuery :: Day -> T.Text -> Either String (Query,[QueryOpt]) parseBooleanQuery d t = either (Left . ("failed to parse query:" <>) . customErrorBundlePretty) Right $ parsewith spacedQueriesP t where regexP :: SimpleTextParser T.Text regexP = choice' [ stripquotes . T.pack <$> between (char '\'') (char '\'') (many $ noneOf ("'" :: [Char])), stripquotes . T.pack <$> between (char '"') (char '"') (many $ noneOf ("\"" :: [Char])), T.pack <$> (notFollowedBy keywordSpaceP >> (many $ noneOf (") \n\r" :: [Char]))) ] queryPrefixP :: SimpleTextParser T.Text queryPrefixP = (string "not:" <> (fromMaybe "" <$> optional queryPrefixP)) <|> choice' (string <$> queryprefixes) "query prefix" queryTermP :: SimpleTextParser (Query, [QueryOpt]) queryTermP = do prefix <- optional queryPrefixP queryRegex <- regexP case parseQueryTerm d (fromMaybe "" prefix <> queryRegex) of Right q -> return q Left err -> error' err keywordSpaceP :: SimpleTextParser T.Text keywordSpaceP = choice' (string' <$> ["not ", "and ", "or "]) parQueryP,notQueryP :: SimpleTextParser (Query, [QueryOpt]) parQueryP = between (char '(' >> skipNonNewlineSpaces) (try $ skipNonNewlineSpaces >> char ')') spacedQueriesP <|> queryTermP notQueryP = (maybe id (\_ (q, qopts) -> (Not q, qopts)) <$> optional (try $ string' "not" >> notFollowedBy (char ':') >> skipNonNewlineSpaces1)) <*> parQueryP andQueriesP,orQueriesP,spacedQueriesP :: SimpleTextParser (Query, [QueryOpt]) andQueriesP = nArityOp And <$> notQueryP `sepBy` (try $ skipNonNewlineSpaces >> string' "and" >> skipNonNewlineSpaces1) orQueriesP = nArityOp Or <$> andQueriesP `sepBy` (try $ skipNonNewlineSpaces >> string' "or" >> skipNonNewlineSpaces1) spacedQueriesP = nArityOp combineQueryList <$> orQueriesP `sepBy` skipNonNewlineSpaces1 nArityOp :: ([Query] -> Query) -> [(Query, [QueryOpt])] -> (Query, [QueryOpt]) nArityOp f res = let (qs, qoptss) = unzip res qoptss' = concat qoptss in case qs of [] -> (Any, qoptss') (q:[]) -> (simplifyQuery q, qoptss') _ -> (simplifyQuery $ f qs, qoptss') -- | Parse the argument of an amt query term ([OP][SIGN]NUM), to an -- OrdPlus and a Quantity, or if parsing fails, an error message. OP -- can be <=, <, >=, >, or = . NUM can be a simple integer or decimal. -- If a decimal, the decimal mark must be period, and it must have -- digits preceding it. Digit group marks are not allowed. parseAmountQueryTerm :: T.Text -> Either String (OrdPlus, Quantity) parseAmountQueryTerm amtarg = case amtarg of -- number has a + sign, do a signed comparison (parse "<=+" -> Just q) -> Right (LtEq ,q) (parse "<+" -> Just q) -> Right (Lt ,q) (parse ">=+" -> Just q) -> Right (GtEq ,q) (parse ">+" -> Just q) -> Right (Gt ,q) (parse "=+" -> Just q) -> Right (Eq ,q) (parse "+" -> Just q) -> Right (Eq ,q) -- number has a - sign, do a signed comparison (parse "<-" -> Just q) -> Right (Lt ,-q) (parse "<=-" -> Just q) -> Right (LtEq ,-q) (parse ">-" -> Just q) -> Right (Gt ,-q) (parse ">=-" -> Just q) -> Right (GtEq ,-q) (parse "=-" -> Just q) -> Right (Eq ,-q) (parse "-" -> Just q) -> Right (Eq ,-q) -- number is unsigned and zero, do a signed comparison (more useful) (parse "<=" -> Just 0) -> Right (LtEq ,0) (parse "<" -> Just 0) -> Right (Lt ,0) (parse ">=" -> Just 0) -> Right (GtEq ,0) (parse ">" -> Just 0) -> Right (Gt ,0) -- number is unsigned and non-zero, do an absolute magnitude comparison (parse "<=" -> Just q) -> Right (AbsLtEq ,q) (parse "<" -> Just q) -> Right (AbsLt ,q) (parse ">=" -> Just q) -> Right (AbsGtEq ,q) (parse ">" -> Just q) -> Right (AbsGt ,q) (parse "=" -> Just q) -> Right (AbsEq ,q) (parse "" -> Just q) -> Right (AbsEq ,q) _ -> Left . T.unpack $ "could not parse as a comparison operator followed by an optionally-signed number: " <> amtarg where -- Strip outer whitespace from the text, require and remove the -- specified prefix, remove all whitespace from the remainder, and -- read it as a simple integer or decimal if possible. parse :: T.Text -> T.Text -> Maybe Quantity parse p s = (T.stripPrefix p . T.strip) s >>= readMay . T.unpack . T.filter (/=' ') parseTag :: T.Text -> Either RegexError Query parseTag s = do tag <- toRegexCI $ if T.null v then s else n body <- if T.null v then pure Nothing else Just <$> toRegexCI (T.tail v) return $ Tag tag body where (n,v) = T.break (=='=') s -- | Parse one or more account type code letters to a query matching any of those types. parseTypeCodes :: T.Text -> Either String Query parseTypeCodes s = case partitionEithers $ map (parseAccountType False . T.singleton) $ T.unpack s of ((e:_),_) -> Left $ "could not parse " <> show e <> " as an account type code.\n" <> help ([],[]) -> Left help ([],ts) -> Right $ Type ts where help = "type:'s argument should be one or more of " ++ accountTypeChoices False ++ " (case insensitive)." accountTypeChoices :: Bool -> String accountTypeChoices allowlongform = intercalate ", " -- keep synced with parseAccountType $ ["A","L","E","R","X","C","V"] ++ if allowlongform then ["Asset","Liability","Equity","Revenue","Expense","Cash","Conversion"] else [] -- | Case-insensitively parse one single-letter code, or one long-form word if permitted, to an account type. -- On failure, returns the unparseable text. parseAccountType :: Bool -> Text -> Either String AccountType parseAccountType allowlongform s = case T.toLower s of -- keep synced with accountTypeChoices "a" -> Right Asset "l" -> Right Liability "e" -> Right Equity "r" -> Right Revenue "x" -> Right Expense "c" -> Right Cash "v" -> Right Conversion "asset" | allowlongform -> Right Asset "liability" | allowlongform -> Right Liability "equity" | allowlongform -> Right Equity "revenue" | allowlongform -> Right Revenue "expense" | allowlongform -> Right Expense "cash" | allowlongform -> Right Cash "conversion" | allowlongform -> Right Conversion _ -> Left $ T.unpack s -- | Parse the value part of a "status:" query, or return an error. parseStatus :: T.Text -> Either String Status parseStatus s | s `elem` ["*","1"] = Right Cleared | s `elem` ["","0"] = Right Unmarked | s == "!" = Right Pending | otherwise = Left $ "could not parse "++show s++" as a status (should be *, ! or empty)" -- | Parse the boolean value part of a "status:" query. "1" means true, -- anything else will be parsed as false without error. parseBool :: T.Text -> Bool parseBool s = s `elem` truestrings truestrings :: [T.Text] truestrings = ["1"] -- * modifying simplifyQuery :: Query -> Query simplifyQuery q0 = let q1 = simplify q0 in if q1 == q0 then q0 else simplifyQuery q1 where simplify (And []) = Any simplify (And [q]) = simplify q simplify (And qs) | same qs = simplify $ head qs | None `elem` qs = None | all queryIsDate qs = Date $ spansIntersect $ mapMaybe queryTermDateSpan qs | otherwise = And $ map simplify dateqs ++ map simplify otherqs where (dateqs, otherqs) = partition queryIsDate $ filter (/=Any) qs simplify (Or []) = Any simplify (Or [q]) = simplifyQuery q simplify (Or qs) | same qs = simplify $ head qs | Any `elem` qs = Any -- all queryIsDate qs = Date $ spansUnion $ mapMaybe queryTermDateSpan qs ? | otherwise = Or $ map simplify $ filter (/=None) qs simplify (Date (DateSpan Nothing Nothing)) = Any simplify (Date2 (DateSpan Nothing Nothing)) = Any simplify q = q same [] = True same (a:as) = all (a==) as -- | Remove query terms (or whole sub-expressions) from this query -- which do not match the given predicate. XXX Semantics not completely clear. -- Also calls simplifyQuery on the result. filterQuery :: (Query -> Bool) -> Query -> Query filterQuery p = simplifyQuery . filterQuery' p -- | Like filterQuery, but returns the filtered query as is, without simplifying. filterQuery' :: (Query -> Bool) -> Query -> Query filterQuery' p (And qs) = And $ map (filterQuery p) qs filterQuery' p (Or qs) = Or $ map (filterQuery p) qs filterQuery' p q = if p q then q else Any -- | Remove query terms (or whole sub-expressions) from this query -- which match neither the given predicate nor that predicate negated -- (eg, if predicate is queryIsAcct, this will keep both "acct:" and "not:acct:" terms). -- Also calls simplifyQuery on the result. -- (Since 1.24.1, might be merged into filterQuery in future.) -- XXX Semantics not completely clear. filterQueryOrNotQuery :: (Query -> Bool) -> Query -> Query filterQueryOrNotQuery p0 = simplifyQuery . filterQueryOrNotQuery' p0 where filterQueryOrNotQuery' :: (Query -> Bool) -> Query -> Query filterQueryOrNotQuery' p (And qs) = And $ map (filterQueryOrNotQuery p) qs filterQueryOrNotQuery' p (Or qs) = Or $ map (filterQueryOrNotQuery p) qs filterQueryOrNotQuery' p (Not q) | p q = Not $ filterQueryOrNotQuery p q filterQueryOrNotQuery' p q = if p q then q else Any -- * predicates -- | Does this simple query predicate match any part of this possibly compound query ? matchesQuery :: (Query -> Bool) -> Query -> Bool matchesQuery p (And qs) = any (matchesQuery p) qs matchesQuery p (Or qs) = any (matchesQuery p) qs matchesQuery p (Not q) = p q matchesQuery p q = p q -- | Does this query match everything ? queryIsNull :: Query -> Bool queryIsNull Any = True queryIsNull (And []) = True queryIsNull (Not (Or [])) = True queryIsNull _ = False -- | Is this a simple query of this type (date:) ? -- Does not match a compound query involving and/or/not. -- Likewise for the following functions. queryIsDate :: Query -> Bool queryIsDate (Date _) = True queryIsDate _ = False queryIsDate2 :: Query -> Bool queryIsDate2 (Date2 _) = True queryIsDate2 _ = False queryIsDateOrDate2 :: Query -> Bool queryIsDateOrDate2 (Date _) = True queryIsDateOrDate2 (Date2 _) = True queryIsDateOrDate2 _ = False queryIsStatus :: Query -> Bool queryIsStatus (StatusQ _) = True queryIsStatus _ = False queryIsCode :: Query -> Bool queryIsCode (Code _) = True queryIsCode _ = False queryIsDesc :: Query -> Bool queryIsDesc (Desc _) = True queryIsDesc _ = False queryIsTag :: Query -> Bool queryIsTag (Tag _ _) = True queryIsTag _ = False queryIsAcct :: Query -> Bool queryIsAcct (Acct _) = True queryIsAcct _ = False queryIsType :: Query -> Bool queryIsType (Type _) = True queryIsType _ = False queryIsDepth :: Query -> Bool queryIsDepth (Depth _) = True queryIsDepth _ = False queryIsReal :: Query -> Bool queryIsReal (Real _) = True queryIsReal _ = False queryIsAmt :: Query -> Bool queryIsAmt (Amt _ _) = True queryIsAmt _ = False queryIsSym :: Query -> Bool queryIsSym (Sym _) = True queryIsSym _ = False -- | Does this query specify a start date and nothing else (that would -- filter postings prior to the date) ? -- When the flag is true, look for a starting secondary date instead. queryIsStartDateOnly :: Bool -> Query -> Bool queryIsStartDateOnly _ Any = False queryIsStartDateOnly _ None = False queryIsStartDateOnly secondary (Or ms) = all (queryIsStartDateOnly secondary) ms queryIsStartDateOnly secondary (And ms) = all (queryIsStartDateOnly secondary) ms queryIsStartDateOnly False (Date (DateSpan (Just _) _)) = True queryIsStartDateOnly True (Date2 (DateSpan (Just _) _)) = True queryIsStartDateOnly _ _ = False -- | Does this query involve a property of transactions (or their postings), -- making it inapplicable to account declarations ? queryIsTransactionRelated :: Query -> Bool queryIsTransactionRelated = matchesQuery ( queryIsDate ||| queryIsDate2 ||| queryIsStatus ||| queryIsCode ||| queryIsDesc ||| queryIsReal ||| queryIsAmt ||| queryIsSym ) (|||) :: (a->Bool) -> (a->Bool) -> (a->Bool) p ||| q = \v -> p v || q v -- * accessors -- | What start date (or secondary date) does this query specify, if any ? -- For OR expressions, use the earliest of the dates. NOT is ignored. queryStartDate :: Bool -> Query -> Maybe Day queryStartDate secondary (Or ms) = earliestMaybeDate $ map (queryStartDate secondary) ms queryStartDate secondary (And ms) = latestMaybeDate $ map (queryStartDate secondary) ms queryStartDate False (Date (DateSpan (Just d) _)) = Just $ fromEFDay d queryStartDate True (Date2 (DateSpan (Just d) _)) = Just $ fromEFDay d queryStartDate _ _ = Nothing -- | What end date (or secondary date) does this query specify, if any ? -- For OR expressions, use the latest of the dates. NOT is ignored. queryEndDate :: Bool -> Query -> Maybe Day queryEndDate secondary (Or ms) = latestMaybeDate' $ map (queryEndDate secondary) ms queryEndDate secondary (And ms) = earliestMaybeDate' $ map (queryEndDate secondary) ms queryEndDate False (Date (DateSpan _ (Just d))) = Just $ fromEFDay d queryEndDate True (Date2 (DateSpan _ (Just d))) = Just $ fromEFDay d queryEndDate _ _ = Nothing queryTermDateSpan (Date spn) = Just spn queryTermDateSpan _ = Nothing -- | What date span (or with a true argument, what secondary date span) does this query specify ? -- OR clauses specifying multiple spans return their union (the span enclosing all of them). -- AND clauses specifying multiple spans return their intersection. -- NOT clauses are ignored. queryDateSpan :: Bool -> Query -> DateSpan queryDateSpan secondary (Or qs) = spansUnion $ map (queryDateSpan secondary) qs queryDateSpan secondary (And qs) = spansIntersect $ map (queryDateSpan secondary) qs queryDateSpan _ (Date spn) = spn queryDateSpan True (Date2 spn) = spn queryDateSpan _ _ = nulldatespan -- | What date span does this query specify, treating primary and secondary dates as equivalent ? -- OR clauses specifying multiple spans return their union (the span enclosing all of them). -- AND clauses specifying multiple spans return their intersection. -- NOT clauses are ignored. queryDateSpan' :: Query -> DateSpan queryDateSpan' (Or qs) = spansUnion $ map queryDateSpan' qs queryDateSpan' (And qs) = spansIntersect $ map queryDateSpan' qs queryDateSpan' (Date spn) = spn queryDateSpan' (Date2 spn) = spn queryDateSpan' _ = nulldatespan -- | What is the earliest of these dates, where Nothing is earliest ? earliestMaybeDate :: [Maybe Day] -> Maybe Day earliestMaybeDate = fromMaybe Nothing . minimumMay -- | What is the latest of these dates, where Nothing is earliest ? latestMaybeDate :: [Maybe Day] -> Maybe Day latestMaybeDate = fromMaybe Nothing . maximumMay -- | What is the earliest of these dates, where Nothing is the latest ? earliestMaybeDate' :: [Maybe Day] -> Maybe Day earliestMaybeDate' = fromMaybe Nothing . minimumMay . filter isJust -- | What is the latest of these dates, where Nothing is the latest ? latestMaybeDate' :: [Maybe Day] -> Maybe Day latestMaybeDate' = fromMaybe Nothing . maximumByMay compareNothingMax where compareNothingMax Nothing Nothing = EQ compareNothingMax (Just _) Nothing = LT compareNothingMax Nothing (Just _) = GT compareNothingMax (Just a) (Just b) = compare a b -- | The depth limit this query specifies, if it has one queryDepth :: Query -> Maybe Int queryDepth = minimumMay . queryDepth' where queryDepth' (Depth d) = [d] queryDepth' (Or qs) = concatMap queryDepth' qs queryDepth' (And qs) = concatMap queryDepth' qs queryDepth' _ = [] -- | The account we are currently focussed on, if any, and whether subaccounts are included. -- Just looks at the first query option. inAccount :: [QueryOpt] -> Maybe (AccountName,Bool) inAccount [] = Nothing inAccount (QueryOptInAcctOnly a:_) = Just (a,False) inAccount (QueryOptInAcct a:_) = Just (a,True) -- | A query for the account(s) we are currently focussed on, if any. -- Just looks at the first query option. inAccountQuery :: [QueryOpt] -> Maybe Query inAccountQuery [] = Nothing inAccountQuery (QueryOptInAcctOnly a : _) = Just . Acct $ accountNameToAccountOnlyRegex a inAccountQuery (QueryOptInAcct a : _) = Just . Acct $ accountNameToAccountRegex a -- -- | Convert a query to its inverse. -- negateQuery :: Query -> Query -- negateQuery = Not -- matching things with queries matchesCommodity :: Query -> CommoditySymbol -> Bool matchesCommodity (Sym r) = regexMatchText r matchesCommodity _ = const True -- | Does the match expression match this (simple) amount ? matchesAmount :: Query -> Amount -> Bool matchesAmount (Not q) a = not $ q `matchesAmount` a matchesAmount (Any) _ = True matchesAmount (None) _ = False matchesAmount (Or qs) a = any (`matchesAmount` a) qs matchesAmount (And qs) a = all (`matchesAmount` a) qs matchesAmount (Amt ord n) a = compareAmount ord n a matchesAmount (Sym r) a = matchesCommodity (Sym r) (acommodity a) matchesAmount _ _ = True -- | Is this amount's quantity less than, greater than, equal to, or unsignedly equal to this number ? compareAmount :: OrdPlus -> Quantity -> Amount -> Bool compareAmount ord q Amount{aquantity=aq} = case ord of Lt -> aq < q LtEq -> aq <= q Gt -> aq > q GtEq -> aq >= q Eq -> aq == q AbsLt -> abs aq < abs q AbsLtEq -> abs aq <= abs q AbsGt -> abs aq > abs q AbsGtEq -> abs aq >= abs q AbsEq -> abs aq == abs q matchesMixedAmount :: Query -> MixedAmount -> Bool matchesMixedAmount q ma = case amountsRaw ma of [] -> q `matchesAmount` nullamt as -> any (q `matchesAmount`) as -- | Does the query match this account name ? -- A matching in: clause is also considered a match. matchesAccount :: Query -> AccountName -> Bool matchesAccount (None) _ = False matchesAccount (Not m) a = not $ matchesAccount m a matchesAccount (Or ms) a = any (`matchesAccount` a) ms matchesAccount (And ms) a = all (`matchesAccount` a) ms matchesAccount (Acct r) a = regexMatchText r a matchesAccount (Depth d) a = accountNameLevel a <= d matchesAccount (Tag _ _) _ = False matchesAccount _ _ = True -- | Like matchesAccount, but with optional extra matching features: -- -- - If the account's type is provided, any type: terms in the query -- must match it (and any negated type: terms must not match it). -- -- - If the account's tags are provided, any tag: terms must match -- at least one of them (and any negated tag: terms must match none). -- matchesAccountExtra :: (AccountName -> Maybe AccountType) -> (AccountName -> [Tag]) -> Query -> AccountName -> Bool matchesAccountExtra atypes atags (Not q ) a = not $ matchesAccountExtra atypes atags q a matchesAccountExtra atypes atags (Or qs ) a = any (\q -> matchesAccountExtra atypes atags q a) qs matchesAccountExtra atypes atags (And qs ) a = all (\q -> matchesAccountExtra atypes atags q a) qs matchesAccountExtra atypes _ (Type ts) a = maybe False (\t -> any (t `isAccountSubtypeOf`) ts) $ atypes a matchesAccountExtra _ atags (Tag npat vpat) a = matchesTags npat vpat $ atags a matchesAccountExtra _ _ q a = matchesAccount q a -- | Does the match expression match this posting ? -- When matching account name, and the posting has been transformed -- in some way, we will match either the original or transformed name. matchesPosting :: Query -> Posting -> Bool matchesPosting (Not q) p = not $ q `matchesPosting` p matchesPosting (Any) _ = True matchesPosting (None) _ = False matchesPosting (Or qs) p = any (`matchesPosting` p) qs matchesPosting (And qs) p = all (`matchesPosting` p) qs matchesPosting (Code r) p = maybe False (regexMatchText r . tcode) $ ptransaction p matchesPosting (Desc r) p = maybe False (regexMatchText r . tdescription) $ ptransaction p matchesPosting (Acct r) p = matches p || maybe False matches (poriginal p) where matches = regexMatchText r . paccount matchesPosting (Date spn) p = spn `spanContainsDate` postingDate p matchesPosting (Date2 spn) p = spn `spanContainsDate` postingDate2 p matchesPosting (StatusQ s) p = postingStatus p == s matchesPosting (Real v) p = v == isReal p matchesPosting q@(Depth _) Posting{paccount=a} = q `matchesAccount` a matchesPosting q@(Amt _ _) Posting{pamount=as} = q `matchesMixedAmount` as matchesPosting (Sym r) Posting{pamount=as} = any (matchesCommodity (Sym r) . acommodity) $ amountsRaw as matchesPosting (Tag n v) p = case (reString n, v) of ("payee", Just v') -> maybe False (regexMatchText v' . transactionPayee) $ ptransaction p ("note", Just v') -> maybe False (regexMatchText v' . transactionNote) $ ptransaction p (_, mv) -> matchesTags n mv $ postingAllTags p matchesPosting (Type _) _ = False -- | Like matchesPosting, but if the posting's account's type is provided, -- any type: terms in the query must match it (and any negated type: terms -- must not match it). matchesPostingExtra :: (AccountName -> Maybe AccountType) -> Query -> Posting -> Bool matchesPostingExtra atype (Not q ) p = not $ matchesPostingExtra atype q p matchesPostingExtra atype (Or qs) p = any (\q -> matchesPostingExtra atype q p) qs matchesPostingExtra atype (And qs) p = all (\q -> matchesPostingExtra atype q p) qs matchesPostingExtra atype (Type ts) p = -- does posting's account's type, if we can detect it, match any of the given types ? (maybe False (\t -> any (t `isAccountSubtypeOf`) ts) . atype $ paccount p) -- or, try the same test with the original (pre-aliasing/pivoting) posting's account || (fromMaybe False $ do porig <- poriginal p let a = paccount porig t <- atype a Just $ any (t `isAccountSubtypeOf`) ts ) matchesPostingExtra _ q p = matchesPosting q p -- | Does the match expression match this transaction ? matchesTransaction :: Query -> Transaction -> Bool matchesTransaction (Not q) t = not $ q `matchesTransaction` t matchesTransaction (Any) _ = True matchesTransaction (None) _ = False matchesTransaction (Or qs) t = any (`matchesTransaction` t) qs matchesTransaction (And qs) t = all (`matchesTransaction` t) qs matchesTransaction (Code r) t = regexMatchText r $ tcode t matchesTransaction (Desc r) t = regexMatchText r $ tdescription t matchesTransaction q@(Acct _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Date spn) t = spanContainsDate spn $ tdate t matchesTransaction (Date2 spn) t = spanContainsDate spn $ transactionDate2 t matchesTransaction (StatusQ s) t = tstatus t == s matchesTransaction (Real v) t = v == hasRealPostings t matchesTransaction q@(Amt _ _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Depth d) t = any (Depth d `matchesPosting`) $ tpostings t matchesTransaction q@(Sym _) t = any (q `matchesPosting`) $ tpostings t matchesTransaction (Tag n v) t = case (reString n, v) of ("payee", Just v') -> regexMatchText v' $ transactionPayee t ("note", Just v') -> regexMatchText v' $ transactionNote t (_, v') -> matchesTags n v' $ transactionAllTags t matchesTransaction (Type _) _ = False -- | Like matchesTransaction, but if the journal's account types are provided, -- any type: terms in the query must match at least one posting's account type -- (and any negated type: terms must match none). matchesTransactionExtra :: (AccountName -> Maybe AccountType) -> Query -> Transaction -> Bool matchesTransactionExtra atype (Not q) t = not $ matchesTransactionExtra atype q t matchesTransactionExtra atype (Or qs) t = any (\q -> matchesTransactionExtra atype q t) qs matchesTransactionExtra atype (And qs) t = all (\q -> matchesTransactionExtra atype q t) qs matchesTransactionExtra atype q@(Type _) t = any (matchesPostingExtra atype q) $ tpostings t matchesTransactionExtra _ q t = matchesTransaction q t -- | Does the query match this transaction description ? -- Tests desc: terms, any other terms are ignored. matchesDescription :: Query -> Text -> Bool matchesDescription (Not q) d = not $ q `matchesDescription` d matchesDescription (Any) _ = True matchesDescription (None) _ = False matchesDescription (Or qs) d = any (`matchesDescription` d) $ filter queryIsDesc qs matchesDescription (And qs) d = all (`matchesDescription` d) $ filter queryIsDesc qs matchesDescription (Code _) _ = False matchesDescription (Desc r) d = regexMatchText r d matchesDescription _ _ = False -- | Does the query match this transaction payee ? -- Tests desc: (and payee: ?) terms, any other terms are ignored. -- XXX Currently an alias for matchDescription. I'm not sure if more is needed, -- There's some shenanigan with payee: and "payeeTag" to figure out. matchesPayeeWIP :: Query -> Payee -> Bool matchesPayeeWIP = matchesDescription -- | Does the query match the name and optionally the value of any of these tags ? matchesTags :: Regexp -> Maybe Regexp -> [Tag] -> Bool matchesTags namepat valuepat = any (matches namepat valuepat) where matches npat vpat (n,v) = regexMatchText npat n && maybe (const True) regexMatchText vpat v -- | Does the query match this market price ? matchesPriceDirective :: Query -> PriceDirective -> Bool matchesPriceDirective (None) _ = False matchesPriceDirective (Not q) p = not $ matchesPriceDirective q p matchesPriceDirective (Or qs) p = any (`matchesPriceDirective` p) qs matchesPriceDirective (And qs) p = all (`matchesPriceDirective` p) qs matchesPriceDirective q@(Amt _ _) p = matchesAmount q (pdamount p) matchesPriceDirective q@(Sym _) p = matchesCommodity q (pdcommodity p) matchesPriceDirective (Date spn) p = spanContainsDate spn (pddate p) matchesPriceDirective _ _ = True -- tests tests_Query = testGroup "Query" [ testCase "simplifyQuery" $ do (simplifyQuery $ Or [Acct $ toRegex' "a"]) @?= (Acct $ toRegex' "a") (simplifyQuery $ Or [Any,None]) @?= (Any) (simplifyQuery $ And [Any,None]) @?= (None) (simplifyQuery $ And [Any,Any]) @?= (Any) (simplifyQuery $ And [Acct $ toRegex' "b",Any]) @?= (Acct $ toRegex' "b") (simplifyQuery $ And [Any,And [Date (DateSpan Nothing Nothing)]]) @?= (Any) (simplifyQuery $ And [Date (DateSpan Nothing (Just $ Exact $ fromGregorian 2013 01 01)), Date (DateSpan (Just $ Exact $ fromGregorian 2012 01 01) Nothing)]) @?= (Date (DateSpan (Just $ Exact $ fromGregorian 2012 01 01) (Just $ Exact $ fromGregorian 2013 01 01))) (simplifyQuery $ And [Or [],Or [Desc $ toRegex' "b b"]]) @?= (Desc $ toRegex' "b b") ,testCase "parseQuery" $ do (parseQuery nulldate "acct:'expenses:autres d\233penses' desc:b") @?= Right (And [Acct $ toRegexCI' "expenses:autres d\233penses", Desc $ toRegexCI' "b"], []) parseQuery nulldate "inacct:a desc:\"b b\"" @?= Right (Desc $ toRegexCI' "b b", [QueryOptInAcct "a"]) parseQuery nulldate "inacct:a inacct:b" @?= Right (Any, [QueryOptInAcct "a", QueryOptInAcct "b"]) parseQuery nulldate "desc:'x x'" @?= Right (Desc $ toRegexCI' "x x", []) parseQuery nulldate "'a a' 'b" @?= Right (Or [Acct $ toRegexCI' "a a",Acct $ toRegexCI' "'b"], []) parseQuery nulldate "\"" @?= Right (Acct $ toRegexCI' "\"", []) ,testCase "parseBooleanQuery" $ do parseBooleanQuery nulldate "(tag:'atag=a')" @?= Right (Tag (toRegexCI' "atag") (Just $ toRegexCI' "a"), []) parseBooleanQuery nulldate "( tag:\"atag=a\" )" @?= Right (Tag (toRegexCI' "atag") (Just $ toRegexCI' "a"), []) parseBooleanQuery nulldate "(acct:'expenses:food')" @?= Right (Acct $ toRegexCI' "expenses:food", []) parseBooleanQuery nulldate "(((acct:'expenses:food')))" @?= Right (Acct $ toRegexCI' "expenses:food", []) parseBooleanQuery nulldate "acct:'expenses:food' AND desc:'b'" @?= Right (And [Acct $ toRegexCI' "expenses:food", Desc $ toRegexCI' "b"], []) parseBooleanQuery nulldate "((desc:'a') AND (desc:'b') OR (desc:'c'))" @?= Right (Or [And [Desc $ toRegexCI' "a", Desc $ toRegexCI' "b"], Desc $ toRegexCI' "c"], []) parseBooleanQuery nulldate "((desc:'a') OR (desc:'b') AND (desc:'c'))" @?= Right (Or [Desc $ toRegexCI' "a", And [Desc $ toRegexCI' "b", Desc $ toRegexCI' "c"]], []) parseBooleanQuery nulldate "((desc:'a') AND desc:'b' AND (desc:'c'))" @?= Right (And [Desc $ toRegexCI' "a", Desc $ toRegexCI' "b", Desc $ toRegexCI' "c"], []) parseBooleanQuery nulldate "(NOT (desc:'a') AND (desc:'b'))" @?= Right (And [Not $ Desc $ toRegexCI' "a", Desc $ toRegexCI' "b"], []) parseBooleanQuery nulldate "((desc:'a') AND (NOT desc:'b'))" @?= Right (And [Desc $ toRegexCI' "a", Not $ Desc $ toRegexCI' "b"], []) parseBooleanQuery nulldate "(desc:'a' AND desc:'b')" @?= Right (And [Desc $ toRegexCI' "a", Desc $ toRegexCI' "b"], []) parseBooleanQuery nulldate "(acct:'a' acct:'b')" @?= Right (Or [Acct $ toRegexCI' "a", Acct $ toRegexCI' "b"], []) parseBooleanQuery nulldate " acct:'a' acct:'b'" @?= Right (Or [Acct $ toRegexCI' "a", Acct $ toRegexCI' "b"], []) parseBooleanQuery nulldate "not:a" @?= Right (Not $ Acct $ toRegexCI' "a", []) parseBooleanQuery nulldate "expenses:food OR (tag:A expenses:drink)" @?= Right (Or [Acct $ toRegexCI' "expenses:food", And [Acct $ toRegexCI' "expenses:drink", Tag (toRegexCI' "A") Nothing]], []) parseBooleanQuery nulldate "not a" @?= Right (Not $ Acct $ toRegexCI' "a", []) parseBooleanQuery nulldate "nota" @?= Right (Acct $ toRegexCI' "nota", []) parseBooleanQuery nulldate "not (acct:a)" @?= Right (Not $ Acct $ toRegexCI' "a", []) ,testCase "words''" $ do (words'' [] "a b") @?= ["a","b"] (words'' [] "'a b'") @?= ["a b"] (words'' [] "not:a b") @?= ["not:a","b"] (words'' [] "not:'a b'") @?= ["not:a b"] (words'' [] "'not:a b'") @?= ["not:a b"] (words'' ["desc:"] "not:desc:'a b'") @?= ["not:desc:a b"] (words'' queryprefixes "\"acct:expenses:autres d\233penses\"") @?= ["acct:expenses:autres d\233penses"] (words'' queryprefixes "\"") @?= ["\""] ,testCase "filterQuery" $ do filterQuery queryIsDepth Any @?= Any filterQuery queryIsDepth (Depth 1) @?= Depth 1 filterQuery (not.queryIsDepth) (And [And [StatusQ Cleared,Depth 1]]) @?= StatusQ Cleared filterQuery queryIsDepth (And [Date nulldatespan, Not (Or [Any, Depth 1])]) @?= Any -- XXX unclear ,testCase "parseQueryTerm" $ do parseQueryTerm nulldate "a" @?= Right (Acct $ toRegexCI' "a", []) parseQueryTerm nulldate "acct:expenses:autres d\233penses" @?= Right (Acct $ toRegexCI' "expenses:autres d\233penses", []) parseQueryTerm nulldate "not:desc:a b" @?= Right (Not $ Desc $ toRegexCI' "a b", []) parseQueryTerm nulldate "status:1" @?= Right (StatusQ Cleared, []) parseQueryTerm nulldate "status:*" @?= Right (StatusQ Cleared, []) parseQueryTerm nulldate "status:!" @?= Right (StatusQ Pending, []) parseQueryTerm nulldate "status:0" @?= Right (StatusQ Unmarked, []) parseQueryTerm nulldate "status:" @?= Right (StatusQ Unmarked, []) parseQueryTerm nulldate "payee:x" @?= (,[]) <$> payeeTag (Just "x") parseQueryTerm nulldate "note:x" @?= (,[]) <$> noteTag (Just "x") parseQueryTerm nulldate "real:1" @?= Right (Real True, []) parseQueryTerm nulldate "date:2008" @?= Right (Date $ DateSpan (Just $ Flex $ fromGregorian 2008 01 01) (Just $ Flex $ fromGregorian 2009 01 01), []) parseQueryTerm nulldate "date:from 2012/5/17" @?= Right (Date $ DateSpan (Just $ Exact $ fromGregorian 2012 05 17) Nothing, []) parseQueryTerm nulldate "date:20180101-201804" @?= Right (Date $ DateSpan (Just $ Exact $ fromGregorian 2018 01 01) (Just $ Flex $ fromGregorian 2018 04 01), []) parseQueryTerm nulldate "inacct:a" @?= Right (Any, [QueryOptInAcct "a"]) parseQueryTerm nulldate "tag:a" @?= Right (Tag (toRegexCI' "a") Nothing, []) parseQueryTerm nulldate "tag:a=some value" @?= Right (Tag (toRegexCI' "a") (Just $ toRegexCI' "some value"), []) parseQueryTerm nulldate "amt:<0" @?= Right (Amt Lt 0, []) parseQueryTerm nulldate "amt:>10000.10" @?= Right (Amt AbsGt 10000.1, []) ,testCase "parseAmountQueryTerm" $ do parseAmountQueryTerm "<0" @?= Right (Lt,0) -- special case for convenience, since AbsLt 0 would be always false parseAmountQueryTerm ">0" @?= Right (Gt,0) -- special case for convenience and consistency with above parseAmountQueryTerm " > - 0 " @?= Right (Gt,0) -- accept whitespace around the argument parts parseAmountQueryTerm ">10000.10" @?= Right (AbsGt,10000.1) parseAmountQueryTerm "=0.23" @?= Right (AbsEq,0.23) parseAmountQueryTerm "0.23" @?= Right (AbsEq,0.23) parseAmountQueryTerm "<=+0.23" @?= Right (LtEq,0.23) parseAmountQueryTerm "-0.23" @?= Right (Eq,(-0.23)) assertLeft $ parseAmountQueryTerm "-0,23" assertLeft $ parseAmountQueryTerm "=.23" ,testCase "queryStartDate" $ do let small = Just $ fromGregorian 2000 01 01 big = Just $ fromGregorian 2000 01 02 queryStartDate False (And [Date $ DateSpan (Exact <$> small) Nothing, Date $ DateSpan (Exact <$> big) Nothing]) @?= big queryStartDate False (And [Date $ DateSpan (Exact <$> small) Nothing, Date $ DateSpan Nothing Nothing]) @?= small queryStartDate False (Or [Date $ DateSpan (Exact <$> small) Nothing, Date $ DateSpan (Exact <$> big) Nothing]) @?= small queryStartDate False (Or [Date $ DateSpan (Exact <$> small) Nothing, Date $ DateSpan Nothing Nothing]) @?= Nothing ,testCase "queryEndDate" $ do let small = Just $ fromGregorian 2000 01 01 big = Just $ fromGregorian 2000 01 02 queryEndDate False (And [Date $ DateSpan Nothing (Exact <$> small), Date $ DateSpan Nothing (Exact <$> big)]) @?= small queryEndDate False (And [Date $ DateSpan Nothing (Exact <$> small), Date $ DateSpan Nothing Nothing]) @?= small queryEndDate False (Or [Date $ DateSpan Nothing (Exact <$> small), Date $ DateSpan Nothing (Exact <$> big)]) @?= big queryEndDate False (Or [Date $ DateSpan Nothing (Exact <$> small), Date $ DateSpan Nothing Nothing]) @?= Nothing ,testCase "matchesAccount" $ do assertBool "" $ (Acct $ toRegex' "b:c") `matchesAccount` "a:bb:c:d" assertBool "" $ not $ (Acct $ toRegex' "^a:b") `matchesAccount` "c:a:b" assertBool "" $ Depth 2 `matchesAccount` "a" assertBool "" $ Depth 2 `matchesAccount` "a:b" assertBool "" $ not $ Depth 2 `matchesAccount` "a:b:c" assertBool "" $ Date nulldatespan `matchesAccount` "a" assertBool "" $ Date2 nulldatespan `matchesAccount` "a" assertBool "" $ not $ Tag (toRegex' "a") Nothing `matchesAccount` "a" ,testCase "matchesAccountExtra" $ do let tagq = Tag (toRegexCI' "type") Nothing assertBool "" $ not $ matchesAccountExtra (const Nothing) (const []) tagq "a" assertBool "" $ matchesAccountExtra (const Nothing) (const [("type","")]) tagq "a" ,testGroup "matchesPosting" [ testCase "positive match on cleared posting status" $ assertBool "" $ (StatusQ Cleared) `matchesPosting` nullposting{pstatus=Cleared} ,testCase "negative match on cleared posting status" $ assertBool "" $ not $ (Not $ StatusQ Cleared) `matchesPosting` nullposting{pstatus=Cleared} ,testCase "positive match on unmarked posting status" $ assertBool "" $ (StatusQ Unmarked) `matchesPosting` nullposting{pstatus=Unmarked} ,testCase "negative match on unmarked posting status" $ assertBool "" $ not $ (Not $ StatusQ Unmarked) `matchesPosting` nullposting{pstatus=Unmarked} ,testCase "positive match on true posting status acquired from transaction" $ assertBool "" $ (StatusQ Cleared) `matchesPosting` nullposting{pstatus=Unmarked,ptransaction=Just nulltransaction{tstatus=Cleared}} ,testCase "real:1 on real posting" $ assertBool "" $ (Real True) `matchesPosting` nullposting{ptype=RegularPosting} ,testCase "real:1 on virtual posting fails" $ assertBool "" $ not $ (Real True) `matchesPosting` nullposting{ptype=VirtualPosting} ,testCase "real:1 on balanced virtual posting fails" $ assertBool "" $ not $ (Real True) `matchesPosting` nullposting{ptype=BalancedVirtualPosting} ,testCase "acct:" $ assertBool "" $ (Acct $ toRegex' "'b") `matchesPosting` nullposting{paccount="'b"} ,testCase "tag:" $ do assertBool "" $ not $ (Tag (toRegex' "a") (Just $ toRegex' "r$")) `matchesPosting` nullposting assertBool "" $ (Tag (toRegex' "foo") Nothing) `matchesPosting` nullposting{ptags=[("foo","")]} assertBool "" $ (Tag (toRegex' "foo") Nothing) `matchesPosting` nullposting{ptags=[("foo","baz")]} assertBool "" $ (Tag (toRegex' "foo") (Just $ toRegex' "a")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "" $ not $ (Tag (toRegex' "foo") (Just $ toRegex' "a$")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "" $ not $ (Tag (toRegex' " foo ") (Just $ toRegex' "a")) `matchesPosting` nullposting{ptags=[("foo","bar")]} assertBool "" $ not $ (Tag (toRegex' "foo foo") (Just $ toRegex' " ar ba ")) `matchesPosting` nullposting{ptags=[("foo foo","bar bar")]} ,testCase "a tag match on a posting also sees inherited tags" $ assertBool "" $ (Tag (toRegex' "txntag") Nothing) `matchesPosting` nullposting{ptransaction=Just nulltransaction{ttags=[("txntag","")]}} ,testCase "cur:" $ do let toSym = fst . either error' id . parseQueryTerm (fromGregorian 2000 01 01) . ("cur:"<>) assertBool "" $ not $ toSym "$" `matchesPosting` nullposting{pamount=mixedAmount $ usd 1} -- becomes "^$$", ie testing for null symbol assertBool "" $ (toSym "\\$") `matchesPosting` nullposting{pamount=mixedAmount $ usd 1} -- have to quote $ for regexpr assertBool "" $ (toSym "shekels") `matchesPosting` nullposting{pamount=mixedAmount nullamt{acommodity="shekels"}} assertBool "" $ not $ (toSym "shek") `matchesPosting` nullposting{pamount=mixedAmount nullamt{acommodity="shekels"}} ] ,testCase "matchesTransaction" $ do assertBool "" $ Any `matchesTransaction` nulltransaction assertBool "" $ not $ (Desc $ toRegex' "x x") `matchesTransaction` nulltransaction{tdescription="x"} assertBool "" $ (Desc $ toRegex' "x x") `matchesTransaction` nulltransaction{tdescription="x x"} -- see posting for more tag tests assertBool "" $ (Tag (toRegex' "foo") (Just $ toRegex' "a")) `matchesTransaction` nulltransaction{ttags=[("foo","bar")]} assertBool "" $ (Tag (toRegex' "payee") (Just $ toRegex' "payee")) `matchesTransaction` nulltransaction{tdescription="payee|note"} assertBool "" $ (Tag (toRegex' "note") (Just $ toRegex' "note")) `matchesTransaction` nulltransaction{tdescription="payee|note"} -- a tag match on a transaction also matches posting tags assertBool "" $ (Tag (toRegex' "postingtag") Nothing) `matchesTransaction` nulltransaction{tpostings=[nullposting{ptags=[("postingtag","")]}]} ] hledger-lib-1.30/Hledger/Read.hs0000644000000000000000000002773314434445206014604 0ustar0000000000000000--- * -*- outline-regexp:"--- \\*"; -*- --- ** doc -- In Emacs, use TAB on lines beginning with "-- *" to collapse/expand sections. {-| This is the entry point to hledger's reading system, which can read Journals from various data formats. Use this module if you want to parse journal data or read journal files. Generally it should not be necessary to import modules below this one. -} --- ** language {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PackageImports #-} {-# LANGUAGE ScopedTypeVariables #-} --- ** exports module Hledger.Read ( -- * Journal files PrefixedFilePath, defaultJournal, defaultJournalPath, requireJournalFileExists, ensureJournalFileExists, -- * Journal parsing readJournal, readJournalFile, readJournalFiles, runExceptT, -- * Easy journal parsing readJournal', readJournalFile', readJournalFiles', orDieTrying, -- * Re-exported JournalReader.tmpostingrulep, findReader, splitReaderPrefix, runJournalParser, module Hledger.Read.Common, module Hledger.Read.InputOptions, -- * Tests tests_Read, ) where --- ** imports import qualified Control.Exception as C import Control.Monad (unless, when) import "mtl" Control.Monad.Except (ExceptT(..), runExceptT) import Control.Monad.IO.Class (MonadIO, liftIO) import Data.Default (def) import Data.Foldable (asum) import Data.List (group, sort, sortBy) import Data.List.NonEmpty (nonEmpty) import Data.Maybe (fromMaybe) import Data.Ord (comparing) import Data.Semigroup (sconcat) import Data.Text (Text) import qualified Data.Text as T import qualified Data.Text.IO as T import Data.Time (Day) import Safe (headDef) import System.Directory (doesFileExist, getHomeDirectory) import System.Environment (getEnv) import System.Exit (exitFailure) import System.FilePath ((<.>), (), splitDirectories, splitFileName, takeFileName) import System.Info (os) import System.IO (hPutStr, stderr) import Hledger.Data.Dates (getCurrentDay, parsedateM, showDate) import Hledger.Data.Types import Hledger.Read.Common import Hledger.Read.InputOptions import Hledger.Read.JournalReader as JournalReader import Hledger.Read.CsvReader (tests_CsvReader) import Hledger.Read.RulesReader (tests_RulesReader) -- import Hledger.Read.TimedotReader (tests_TimedotReader) -- import Hledger.Read.TimeclockReader (tests_TimeclockReader) import Hledger.Utils import Prelude hiding (getContents, writeFile) --- ** doctest setup -- $setup -- >>> :set -XOverloadedStrings --- ** journal reading journalEnvVar = "LEDGER_FILE" journalEnvVar2 = "LEDGER" journalDefaultFilename = ".hledger.journal" -- | Read the default journal file specified by the environment, or raise an error. defaultJournal :: IO Journal defaultJournal = defaultJournalPath >>= runExceptT . readJournalFile definputopts >>= either error' return -- PARTIAL: -- | Get the default journal file path specified by the environment. -- Like ledger, we look first for the LEDGER_FILE environment -- variable, and if that does not exist, for the legacy LEDGER -- environment variable. If neither is set, or the value is blank, -- return the hard-coded default, which is @.hledger.journal@ in the -- users's home directory (or in the current directory, if we cannot -- determine a home directory). defaultJournalPath :: IO String defaultJournalPath = do s <- envJournalPath if null s then defpath else return s where envJournalPath = getEnv journalEnvVar `C.catch` (\(_::C.IOException) -> getEnv journalEnvVar2 `C.catch` (\(_::C.IOException) -> return "")) defpath = do home <- getHomeDirectory `C.catch` (\(_::C.IOException) -> return "") return $ home journalDefaultFilename -- | A file path optionally prefixed by a reader name and colon -- (journal:, csv:, timedot:, etc.). type PrefixedFilePath = FilePath -- | @readJournal iopts mfile txt@ -- -- Read a Journal from some text, or return an error message. -- -- The reader (data format) is chosen based on, in this order: -- -- - a reader name provided in @iopts@ -- -- - a reader prefix in the @mfile@ path -- -- - a file extension in @mfile@ -- -- If none of these is available, or if the reader name is unrecognised, -- we use the journal reader. (We used to try all readers in this case; -- since hledger 1.17, we prefer predictability.) readJournal :: InputOpts -> Maybe FilePath -> Text -> ExceptT String IO Journal readJournal iopts mpath txt = do let r :: Reader IO = fromMaybe JournalReader.reader $ findReader (mformat_ iopts) mpath dbg6IO "readJournal: trying reader" (rFormat r) rReadFn r iopts (fromMaybe "(string)" mpath) txt -- | Read a Journal from this file, or from stdin if the file path is -, -- or return an error message. The file path can have a READER: prefix. -- -- The reader (data format) to use is determined from (in priority order): -- the @mformat_@ specified in the input options, if any; -- the file path's READER: prefix, if any; -- a recognised file name extension. -- if none of these identify a known reader, the journal reader is used. -- -- The input options can also configure balance assertion checking, automated posting -- generation, a rules file for converting CSV data, etc. readJournalFile :: InputOpts -> PrefixedFilePath -> ExceptT String IO Journal readJournalFile iopts prefixedfile = do let (mfmt, f) = splitReaderPrefix prefixedfile iopts' = iopts{mformat_=asum [mfmt, mformat_ iopts]} liftIO $ requireJournalFileExists f t <- traceOrLogAt 6 ("readJournalFile: "++takeFileName f) $ liftIO $ readFileOrStdinPortably f -- <- T.readFile f -- or without line ending translation, for testing j <- readJournal iopts' (Just f) t if new_ iopts then do ds <- liftIO $ previousLatestDates f let (newj, newds) = journalFilterSinceLatestDates ds j when (new_save_ iopts && not (null newds)) . liftIO $ saveLatestDates newds f return newj else return j -- | Read a Journal from each specified file path and combine them into one. -- Or, return the first error message. -- -- Combining Journals means concatenating them, basically. -- The parse state resets at the start of each file, which means that -- directives & aliases do not affect subsequent sibling or parent files. -- They do affect included child files though. -- Also the final parse state saved in the Journal does span all files. readJournalFiles :: InputOpts -> [PrefixedFilePath] -> ExceptT String IO Journal readJournalFiles iopts = fmap (maybe def sconcat . nonEmpty) . mapM (readJournalFile iopts) -- | An easy version of 'readJournal' which assumes default options, and fails -- in the IO monad. readJournal' :: Text -> IO Journal readJournal' = orDieTrying . readJournal definputopts Nothing -- | An easy version of 'readJournalFile' which assumes default options, and fails -- in the IO monad. readJournalFile' :: PrefixedFilePath -> IO Journal readJournalFile' = orDieTrying . readJournalFile definputopts -- | An easy version of 'readJournalFiles'' which assumes default options, and fails -- in the IO monad. readJournalFiles' :: [PrefixedFilePath] -> IO Journal readJournalFiles' = orDieTrying . readJournalFiles definputopts --- ** utilities -- | Extract ExceptT to the IO monad, failing with an error message if necessary. orDieTrying :: MonadIO m => ExceptT String m a -> m a orDieTrying a = either (liftIO . fail) return =<< runExceptT a -- | If the specified journal file does not exist (and is not "-"), -- give a helpful error and quit. requireJournalFileExists :: FilePath -> IO () requireJournalFileExists "-" = return () requireJournalFileExists f = do exists <- doesFileExist f unless exists $ do -- XXX might not be a journal file hPutStr stderr $ "The hledger journal file \"" <> f <> "\" was not found.\n" hPutStr stderr "Please create it first, eg with \"hledger add\" or a text editor.\n" hPutStr stderr "Or, specify an existing journal file with -f or LEDGER_FILE.\n" exitFailure -- | Ensure there is a journal file at the given path, creating an empty one if needed. -- On Windows, also ensure that the path contains no trailing dots -- which could cause data loss (see 'isWindowsUnsafeDotPath'). ensureJournalFileExists :: FilePath -> IO () ensureJournalFileExists f = do when (os/="mingw32" && isWindowsUnsafeDotPath f) $ do hPutStr stderr $ "Part of file path \"" <> show f <> "\"\n ends with a dot, which is unsafe on Windows; please use a different path.\n" exitFailure exists <- doesFileExist f unless exists $ do hPutStr stderr $ "Creating hledger journal file " <> show f <> ".\n" -- note Hledger.Utils.UTF8.* do no line ending conversion on windows, -- we currently require unix line endings on all platforms. newJournalContent >>= T.writeFile f -- | Does any part of this path contain non-. characters and end with a . ? -- Such paths are not safe to use on Windows (cf #1056). isWindowsUnsafeDotPath :: FilePath -> Bool isWindowsUnsafeDotPath = any (\x -> last x == '.' && any (/='.') x) . splitDirectories -- | Give the content for a new auto-created journal file. newJournalContent :: IO Text newJournalContent = do d <- getCurrentDay return $ "; journal created " <> T.pack (show d) <> " by hledger\n" -- A "LatestDates" is zero or more copies of the same date, -- representing the latest transaction date read from a file, -- and how many transactions there were on that date. type LatestDates = [Day] -- | Get all instances of the latest date in an unsorted list of dates. -- Ie, if the latest date appears once, return it in a one-element list, -- if it appears three times (anywhere), return three of it. latestDates :: [Day] -> LatestDates latestDates = headDef [] . take 1 . group . reverse . sort -- | Remember that these transaction dates were the latest seen when -- reading this journal file. saveLatestDates :: LatestDates -> FilePath -> IO () saveLatestDates dates f = T.writeFile (latestDatesFileFor f) $ T.unlines $ map showDate dates -- | What were the latest transaction dates seen the last time this -- journal file was read ? If there were multiple transactions on the -- latest date, that number of dates is returned, otherwise just one. -- Or none if no transactions were read, or if latest dates info is not -- available for this file. previousLatestDates :: FilePath -> IO LatestDates previousLatestDates f = do let latestfile = latestDatesFileFor f parsedate s = maybe (fail $ "could not parse date \"" ++ s ++ "\"") return $ parsedateM s exists <- doesFileExist latestfile if exists then traverse (parsedate . T.unpack . T.strip) . T.lines =<< readFileStrictly latestfile else return [] -- | Where to save latest transaction dates for the given file path. -- (.latest.FILE) latestDatesFileFor :: FilePath -> FilePath latestDatesFileFor f = dir ".latest" <.> fname where (dir, fname) = splitFileName f readFileStrictly :: FilePath -> IO Text readFileStrictly f = readFilePortably f >>= \t -> C.evaluate (T.length t) >> return t -- | Given zero or more latest dates (all the same, representing the -- latest previously seen transaction date, and how many transactions -- were seen on that date), remove transactions with earlier dates -- from the journal, and the same number of transactions on the -- latest date, if any, leaving only transactions that we can assume -- are newer. Also returns the new latest dates of the new journal. journalFilterSinceLatestDates :: LatestDates -> Journal -> (Journal, LatestDates) journalFilterSinceLatestDates [] j = (j, latestDates $ map tdate $ jtxns j) journalFilterSinceLatestDates ds@(d:_) j = (j', ds') where samedateorlaterts = filter ((>= d).tdate) $ jtxns j (samedatets, laterts) = span ((== d).tdate) $ sortBy (comparing tdate) samedateorlaterts newsamedatets = drop (length ds) samedatets j' = j{jtxns=newsamedatets++laterts} ds' = latestDates $ map tdate $ samedatets++laterts --- ** tests tests_Read = testGroup "Read" [ tests_Common ,tests_CsvReader ,tests_JournalReader ,tests_RulesReader ] hledger-lib-1.30/Hledger/Read/Common.hs0000644000000000000000000020772114434445206016031 0ustar0000000000000000--- * -*- outline-regexp:"--- \\*"; -*- --- ** doc -- In Emacs, use TAB on lines beginning with "-- *" to collapse/expand sections. {-| File reading/parsing utilities used by multiple readers, and a good amount of the parsers for journal format, to avoid import cycles when JournalReader imports other readers. Some of these might belong in Hledger.Read.JournalReader or Hledger.Read. -} --- ** language {-# LANGUAGE BangPatterns #-} {-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE NoMonoLocalBinds #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE Rank2Types #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE TypeFamilies #-} --- ** exports module Hledger.Read.Common ( Reader (..), InputOpts(..), HasInputOpts(..), definputopts, rawOptsToInputOpts, -- * parsing utilities parseAndFinaliseJournal, initialiseAndParseJournal, journalFinalise, journalAddForecast, journalAddAutoPostings, setYear, getYear, setDefaultCommodityAndStyle, getDefaultCommodityAndStyle, getDefaultAmountStyle, getAmountStyle, addDeclaredAccountTags, addDeclaredAccountType, pushParentAccount, popParentAccount, getParentAccount, addAccountAlias, getAccountAliases, clearAccountAliases, journalAddFile, -- * parsers -- ** transaction bits statusp, codep, descriptionp, -- ** dates datep, datetimep, secondarydatep, -- ** account names modifiedaccountnamep, accountnamep, -- ** account aliases accountaliasp, -- ** amounts spaceandamountormissingp, amountp, amountp', commoditysymbolp, costp, balanceassertionp, lotcostp, numberp, fromRawNumber, rawnumberp, parseamount, parseamount', parsemixedamount, parsemixedamount', -- ** comments isLineCommentStart, isSameLineCommentStart, multilinecommentp, emptyorcommentlinep, followingcommentp, transactioncommentp, postingcommentp, -- ** bracketed dates bracketeddatetagsp, -- ** misc noncommenttextp, noncommenttext1p, singlespacedtext1p, singlespacednoncommenttext1p, singlespacedtextsatisfying1p, singlespacep, skipNonNewlineSpaces, skipNonNewlineSpaces1, aliasesFromOpts, -- * tests tests_Common, ) where --- ** imports import Control.Applicative.Permutations (runPermutation, toPermutationWithDefault) import Control.Monad (foldM, liftM2, when, unless, (>=>), (<=<)) import qualified Control.Monad.Fail as Fail (fail) import Control.Monad.Except (ExceptT(..), liftEither, withExceptT) import Control.Monad.IO.Class (MonadIO, liftIO) import Control.Monad.State.Strict (MonadState, evalStateT, modify', get, put) import Control.Monad.Trans.Class (lift) import Data.Bifunctor (bimap, second) import Data.Char (digitToInt, isDigit, isSpace) import Data.Decimal (DecimalRaw (Decimal), Decimal) import Data.Either (rights) import Data.Function ((&)) import Data.Functor ((<&>), ($>), void) import Data.List (find, genericReplicate, union) import Data.List.NonEmpty (NonEmpty(..)) import Data.Maybe (catMaybes, fromMaybe, isJust, listToMaybe) import qualified Data.Map as M import qualified Data.Semigroup as Sem import Data.Text (Text, stripEnd) import qualified Data.Text as T import Data.Time.Calendar (Day, fromGregorianValid, toGregorian) import Data.Time.Clock.POSIX (getPOSIXTime) import Data.Time.LocalTime (LocalTime(..), TimeOfDay(..)) import Data.Word (Word8) import System.FilePath (takeFileName) import Text.Megaparsec import Text.Megaparsec.Char (char, char', digitChar, newline, string) import Text.Megaparsec.Char.Lexer (decimal) import Text.Megaparsec.Custom (FinalParseError, attachSource, finalErrorBundlePretty, parseErrorAt, parseErrorAtRegion) -- import Text.Megaparsec.Debug (dbg) -- from megaparsec 9.3+ import Hledger.Data import Hledger.Query (Query(..), filterQuery, parseQueryTerm, queryEndDate, queryStartDate, queryIsDate, simplifyQuery) import Hledger.Reports.ReportOptions (ReportOpts(..), queryFromFlags, rawOptsToReportOpts) import Hledger.Utils import Hledger.Read.InputOptions --- ** doctest setup -- $setup -- >>> :set -XOverloadedStrings --- ** types -- main types; a few more below -- | A hledger journal reader is a triple of storage format name, a -- detector of that format, and a parser from that format to Journal. -- The type variable m appears here so that rParserr can hold a -- journal parser, which depends on it. data Reader m = Reader { -- The canonical name of the format handled by this reader rFormat :: StorageFormat -- The file extensions recognised as containing this format ,rExtensions :: [String] -- The entry point for reading this format, accepting input options, file -- path for error messages and file contents, producing an exception-raising IO -- action that produces a journal or error message. ,rReadFn :: InputOpts -> FilePath -> Text -> ExceptT String IO Journal -- The actual megaparsec parser called by the above, in case -- another parser (includedirectivep) wants to use it directly. ,rParser :: MonadIO m => ErroringJournalParser m ParsedJournal } instance Show (Reader m) where show r = rFormat r ++ " reader" -- | Parse an InputOpts from a RawOpts and a provided date. -- This will fail with a usage error if the forecast period expression cannot be parsed. rawOptsToInputOpts :: Day -> RawOpts -> InputOpts rawOptsToInputOpts day rawopts = let noinferbalancingcosts = boolopt "strict" rawopts || stringopt "args" rawopts == "balancednoautoconversion" -- Do we really need to do all this work just to get the requested end date? This is duplicating -- much of reportOptsToSpec. ropts = rawOptsToReportOpts day rawopts argsquery = map fst . rights . map (parseQueryTerm day) $ querystring_ ropts datequery = simplifyQuery . filterQuery queryIsDate . And $ queryFromFlags ropts : argsquery styles = either err id $ commodityStyleFromRawOpts rawopts where err e = error' $ "could not parse commodity-style: '" ++ e ++ "'" -- PARTIAL: in definputopts{ -- files_ = listofstringopt "file" rawopts mformat_ = Nothing ,mrules_file_ = maybestringopt "rules-file" rawopts ,aliases_ = listofstringopt "alias" rawopts ,anon_ = boolopt "anon" rawopts ,new_ = boolopt "new" rawopts ,new_save_ = True ,pivot_ = stringopt "pivot" rawopts ,forecast_ = forecastPeriodFromRawOpts day rawopts ,verbose_tags_ = boolopt "verbose-tags" rawopts ,reportspan_ = DateSpan (Exact <$> queryStartDate False datequery) (Exact <$> queryEndDate False datequery) ,auto_ = boolopt "auto" rawopts ,infer_equity_ = boolopt "infer-equity" rawopts && conversionop_ ropts /= Just ToCost ,infer_costs_ = boolopt "infer-costs" rawopts ,balancingopts_ = defbalancingopts{ ignore_assertions_ = boolopt "ignore-assertions" rawopts , infer_balancing_costs_ = not noinferbalancingcosts , commodity_styles_ = Just styles } ,strict_ = boolopt "strict" rawopts ,_ioDay = day } -- | Get the date span from --forecast's PERIODEXPR argument, if any. -- This will fail with a usage error if the period expression cannot be parsed, -- or if it contains a report interval. forecastPeriodFromRawOpts :: Day -> RawOpts -> Maybe DateSpan forecastPeriodFromRawOpts d rawopts = do arg <- maybestringopt "forecast" rawopts let period = parsePeriodExpr d . stripquotes $ T.pack arg return $ if null arg then nulldatespan else either badParse (getSpan arg) period where badParse e = usageError $ "could not parse forecast period : "++customErrorBundlePretty e getSpan arg (interval, requestedspan) = case interval of NoInterval -> requestedspan _ -> usageError $ "--forecast's argument should not contain a report interval (" ++ show interval ++ " in \"" ++ arg ++ "\")" -- | Given the name of the option and the raw options, returns either -- | * a map of successfully parsed commodity styles, if all options where successfully parsed -- | * the first option which failed to parse, if one or more options failed to parse commodityStyleFromRawOpts :: RawOpts -> Either String (M.Map CommoditySymbol AmountStyle) commodityStyleFromRawOpts rawOpts = foldM (\r -> fmap (\(c,a) -> M.insert c a r) . parseCommodity) mempty optList where optList = listofstringopt "commodity-style" rawOpts parseCommodity optStr = case parseamount optStr of Left _ -> Left optStr Right (Amount acommodity _ astyle _) -> Right (acommodity, astyle) -- | Given a parser to ParsedJournal, input options, file path and -- content: run the parser on the content, and finalise the result to -- get a Journal; or throw an error. parseAndFinaliseJournal :: ErroringJournalParser IO ParsedJournal -> InputOpts -> FilePath -> Text -> ExceptT String IO Journal parseAndFinaliseJournal parser iopts f txt = initialiseAndParseJournal parser iopts f txt >>= journalFinalise iopts f txt -- | Given a parser to ParsedJournal, input options, file path and -- content: run the parser on the content. This is all steps of -- 'parseAndFinaliseJournal' without the finalisation step, and is used when -- you need to perform other actions before finalisation, as in parsing -- Timeclock and Timedot files. initialiseAndParseJournal :: ErroringJournalParser IO ParsedJournal -> InputOpts -> FilePath -> Text -> ExceptT String IO Journal initialiseAndParseJournal parser iopts f txt = prettyParseErrors $ runParserT (evalStateT parser initJournal) f txt where y = first3 . toGregorian $ _ioDay iopts initJournal = nulljournal{jparsedefaultyear = Just y, jincludefilestack = [f]} -- Flatten parse errors and final parse errors, and output each as a pretty String. prettyParseErrors :: ExceptT FinalParseError IO (Either (ParseErrorBundle Text HledgerParseErrorData) a) -> ExceptT String IO a prettyParseErrors = withExceptT customErrorBundlePretty . liftEither <=< withExceptT (finalErrorBundlePretty . attachSource f txt) {- HLINT ignore journalFinalise "Redundant <&>" -} -- silence this warning, the code is clearer as is -- NB activates TH, may slow compilation ? https://github.com/ndmitchell/hlint/blob/master/README.md#customizing-the-hints -- | Post-process a Journal that has just been parsed or generated, in this order: -- -- - add misc info (file path, read time) -- -- - reverse transactions into their original parse order -- -- - apply canonical commodity styles -- -- - add tags from account directives to postings' tags -- -- - add forecast transactions if enabled -- -- - add tags from account directives to postings' tags (again to affect forecast transactions) -- -- - add auto postings if enabled -- -- - add tags from account directives to postings' tags (again to affect auto postings) -- -- - evaluate balance assignments and balance each transaction -- -- - check balance assertions if enabled -- -- - infer equity postings in conversion transactions if enabled -- -- - infer market prices from costs if enabled -- -- - check all accounts have been declared if in strict mode -- -- - check all commodities have been declared if in strict mode -- journalFinalise :: InputOpts -> FilePath -> Text -> ParsedJournal -> ExceptT String IO Journal journalFinalise iopts@InputOpts{..} f txt pj = do t <- liftIO getPOSIXTime liftEither $ do j <- pj{jglobalcommoditystyles=fromMaybe mempty $ commodity_styles_ balancingopts_} & journalSetLastReadTime t -- save the last read time & journalAddFile (f, txt) -- save the main file's info & journalReverse -- convert all lists to the order they were parsed & journalAddAccountTypes -- build a map of all known account types & journalApplyCommodityStyles -- Infer and apply commodity styles - should be done early <&> journalAddForecast (verbose_tags_) (forecastPeriod iopts pj) -- Add forecast transactions if enabled <&> journalPostingsAddAccountTags -- Add account tags to postings, so they can be matched by auto postings. >>= (if auto_ && not (null $ jtxnmodifiers pj) then journalAddAutoPostings verbose_tags_ _ioDay balancingopts_ -- Add auto postings if enabled, and account tags if needed else pure) -- XXX how to force debug output here ? -- >>= Right . dbg0With (concatMap (T.unpack.showTransaction).jtxns) -- >>= \j -> deepseq (concatMap (T.unpack.showTransaction).jtxns $ j) (return j) >>= journalMarkRedundantCosts -- Mark redundant costs, to help journalBalanceTransactions ignore them >>= journalBalanceTransactions balancingopts_ -- Balance all transactions and maybe check balance assertions. >>= (if infer_costs_ then journalInferCostsFromEquity else pure) -- Maybe infer costs from equity postings where possible <&> (if infer_equity_ then journalAddInferredEquityPostings verbose_tags_ else id) -- Maybe infer equity postings from costs where possible <&> journalInferMarketPricesFromTransactions -- infer market prices from commodity-exchanging transactions <&> traceOrLogAt 6 ("journalFinalise: " <> takeFileName f) -- debug logging <&> dbgJournalAcctDeclOrder ("journalFinalise: " <> takeFileName f <> " acct decls : ") <&> journalRenumberAccountDeclarations <&> dbgJournalAcctDeclOrder ("journalFinalise: " <> takeFileName f <> " acct decls renumbered: ") when strict_ $ do journalCheckAccounts j -- If in strict mode, check all postings are to declared accounts journalCheckCommodities j -- and using declared commodities -- journalCheckPairedConversionPostings j -- check conversion postings are in adjacent pairs -- disabled for now, single conversion postings are sometimes needed eg with paypal return j -- | Apply any auto posting rules to generate extra postings on this journal's transactions. -- With a true first argument, adds visible tags to generated postings and modified transactions. journalAddAutoPostings :: Bool -> Day -> BalancingOpts -> Journal -> Either String Journal journalAddAutoPostings verbosetags d bopts = -- Balance all transactions without checking balance assertions, journalBalanceTransactions bopts{ignore_assertions_=True} -- then add the auto postings -- (Note adding auto postings after balancing means #893b fails; -- adding them before balancing probably means #893a, #928, #938 fail.) >=> journalModifyTransactions verbosetags d -- | Generate periodic transactions from all periodic transaction rules in the journal. -- These transactions are added to the in-memory Journal (but not the on-disk file). -- -- The start & end date for generated periodic transactions are determined in -- a somewhat complicated way; see the hledger manual -> Periodic transactions. journalAddForecast :: Bool -> Maybe DateSpan -> Journal -> Journal journalAddForecast _ Nothing j = j journalAddForecast verbosetags (Just forecastspan) j = j{jtxns = jtxns j ++ forecasttxns} where forecasttxns = map (txnTieKnot . transactionTransformPostings (postingApplyCommodityStyles $ journalCommodityStyles j)) . filter (spanContainsDate forecastspan . tdate) . concatMap (\pt -> runPeriodicTransaction verbosetags pt forecastspan) $ jperiodictxns j setYear :: Year -> JournalParser m () setYear y = modify' (\j -> j{jparsedefaultyear=Just y}) getYear :: JournalParser m (Maybe Year) getYear = fmap jparsedefaultyear get -- | Get the decimal mark that has been specified for parsing, if any -- (eg by the CSV decimal-mark rule, or possibly a future journal directive). -- Return it as an AmountStyle that amount parsers can use. getDecimalMarkStyle :: JournalParser m (Maybe AmountStyle) getDecimalMarkStyle = do Journal{jparsedecimalmark} <- get let mdecmarkStyle = (\c -> Just $ amountstyle{asdecimalpoint=Just c}) =<< jparsedecimalmark return mdecmarkStyle setDefaultCommodityAndStyle :: (CommoditySymbol,AmountStyle) -> JournalParser m () setDefaultCommodityAndStyle cs = modify' (\j -> j{jparsedefaultcommodity=Just cs}) getDefaultCommodityAndStyle :: JournalParser m (Maybe (CommoditySymbol,AmountStyle)) getDefaultCommodityAndStyle = jparsedefaultcommodity `fmap` get -- | Get amount style associated with default currency. -- -- Returns 'AmountStyle' used to defined by a latest default commodity directive -- prior to current position within this file or its parents. getDefaultAmountStyle :: JournalParser m (Maybe AmountStyle) getDefaultAmountStyle = fmap snd <$> getDefaultCommodityAndStyle -- | Get the 'AmountStyle' declared by the most recently parsed (in the current or parent files, -- prior to the current position) commodity directive for the given commodity, if any. getAmountStyle :: CommoditySymbol -> JournalParser m (Maybe AmountStyle) getAmountStyle commodity = do Journal{jcommodities} <- get let mspecificStyle = M.lookup commodity jcommodities >>= cformat mdefaultStyle <- fmap snd <$> getDefaultCommodityAndStyle return $ listToMaybe $ catMaybes [mspecificStyle, mdefaultStyle] addDeclaredAccountTags :: AccountName -> [Tag] -> JournalParser m () addDeclaredAccountTags acct atags = modify' (\j -> j{jdeclaredaccounttags = M.insertWith (flip union) acct atags (jdeclaredaccounttags j)}) addDeclaredAccountType :: AccountName -> AccountType -> JournalParser m () addDeclaredAccountType acct atype = modify' (\j -> j{jdeclaredaccounttypes = M.insertWith (++) atype [acct] (jdeclaredaccounttypes j)}) pushParentAccount :: AccountName -> JournalParser m () pushParentAccount acct = modify' (\j -> j{jparseparentaccounts = acct : jparseparentaccounts j}) popParentAccount :: JournalParser m () popParentAccount = do j <- get case jparseparentaccounts j of [] -> unexpected (Tokens ('E' :| "nd of apply account block with no beginning")) (_:rest) -> put j{jparseparentaccounts=rest} getParentAccount :: JournalParser m AccountName getParentAccount = fmap (concatAccountNames . reverse . jparseparentaccounts) get addAccountAlias :: MonadState Journal m => AccountAlias -> m () addAccountAlias a = modify' (\(j@Journal{..}) -> j{jparsealiases=a:jparsealiases}) getAccountAliases :: MonadState Journal m => m [AccountAlias] getAccountAliases = fmap jparsealiases get clearAccountAliases :: MonadState Journal m => m () clearAccountAliases = modify' (\j -> j{jparsealiases=[]}) -- getTransactionCount :: MonadState Journal m => m Integer -- getTransactionCount = fmap jparsetransactioncount get -- -- setTransactionCount :: MonadState Journal m => Integer -> m () -- setTransactionCount i = modify' (\j -> j{jparsetransactioncount=i}) -- -- -- | Increment the transaction index by one and return the new value. -- incrementTransactionCount :: MonadState Journal m => m Integer -- incrementTransactionCount = do -- modify' (\j -> j{jparsetransactioncount=jparsetransactioncount j + 1}) -- getTransactionCount journalAddFile :: (FilePath,Text) -> Journal -> Journal journalAddFile f j@Journal{jfiles=fs} = j{jfiles=fs++[f]} -- append, unlike the other fields, even though we do a final reverse, -- to compensate for additional reversal due to including/monoid-concatting -- A version of `match` that is strict in the returned text match' :: TextParser m a -> TextParser m (Text, a) match' p = do (!txt, p') <- match p pure (txt, p') --- ** parsers --- *** transaction bits statusp :: TextParser m Status statusp = choice' [ skipNonNewlineSpaces >> char '*' >> return Cleared , skipNonNewlineSpaces >> char '!' >> return Pending , return Unmarked ] codep :: TextParser m Text codep = option "" $ do try $ do skipNonNewlineSpaces1 char '(' code <- takeWhileP Nothing $ \c -> c /= ')' && c /= '\n' char ')' "closing bracket ')' for transaction code" pure code -- | Parse possibly empty text until a semicolon or newline. -- Whitespace is preserved (for now - perhaps helps preserve alignment -- of same-line comments ?). descriptionp :: TextParser m Text descriptionp = noncommenttextp "description" --- *** dates -- | Parse a date in YYYY-MM-DD format. -- Slash (/) and period (.) are also allowed as separators. -- The year may be omitted if a default year has been set. -- Leading zeroes may be omitted. datep :: JournalParser m Day datep = do mYear <- getYear lift $ datep' mYear datep' :: Maybe Year -> TextParser m Day datep' mYear = do startOffset <- getOffset d1 <- yearorintp "year or month" sep <- datesepchar "date separator" d2 <- decimal "month or day" case d1 of Left y -> fullDate startOffset y sep d2 Right m -> partialDate startOffset mYear m sep d2 "full or partial date" where fullDate :: Int -> Year -> Char -> Month -> TextParser m Day fullDate startOffset year sep1 month = do sep2 <- satisfy isDateSepChar "date separator" day <- decimal "day" endOffset <- getOffset let dateStr = show year ++ [sep1] ++ show month ++ [sep2] ++ show day when (sep1 /= sep2) $ customFailure $ parseErrorAtRegion startOffset endOffset $ "This date is malformed because the separators are different.\n" ++"Please use consistent separators." case fromGregorianValid year month day of Nothing -> customFailure $ parseErrorAtRegion startOffset endOffset $ "This date is invalid, please correct it: " ++ dateStr Just date -> pure $! date partialDate :: Int -> Maybe Year -> Month -> Char -> MonthDay -> TextParser m Day partialDate startOffset myr month sep day = do endOffset <- getOffset case myr of Just year -> case fromGregorianValid year month day of Nothing -> customFailure $ parseErrorAtRegion startOffset endOffset $ "This date is invalid, please correct it: " ++ dateStr Just date -> pure $! date where dateStr = show year ++ [sep] ++ show month ++ [sep] ++ show day Nothing -> customFailure $ parseErrorAtRegion startOffset endOffset $ "The partial date "++dateStr++" can not be parsed because the current year is unknown.\n" ++"Consider making it a full date, or add a default year directive.\n" where dateStr = show month ++ [sep] ++ show day {-# INLINABLE datep' #-} -- | Parse a date and time in YYYY-MM-DD HH:MM[:SS][+-ZZZZ] format. -- Slash (/) and period (.) are also allowed as date separators. -- The year may be omitted if a default year has been set. -- Seconds are optional. -- The timezone is optional and ignored (the time is always interpreted as a local time). -- Leading zeroes may be omitted (except in a timezone). datetimep :: JournalParser m LocalTime datetimep = do mYear <- getYear lift $ datetimep' mYear datetimep' :: Maybe Year -> TextParser m LocalTime datetimep' mYear = do day <- datep' mYear skipNonNewlineSpaces1 time <- timeOfDay optional timeZone -- ignoring time zones pure $ LocalTime day time where timeOfDay :: TextParser m TimeOfDay timeOfDay = do off1 <- getOffset h' <- twoDigitDecimal "hour" off2 <- getOffset unless (h' >= 0 && h' <= 23) $ customFailure $ parseErrorAtRegion off1 off2 "invalid time (bad hour)" char ':' "':' (hour-minute separator)" off3 <- getOffset m' <- twoDigitDecimal "minute" off4 <- getOffset unless (m' >= 0 && m' <= 59) $ customFailure $ parseErrorAtRegion off3 off4 "invalid time (bad minute)" s' <- option 0 $ do char ':' "':' (minute-second separator)" off5 <- getOffset s' <- twoDigitDecimal "second" off6 <- getOffset unless (s' >= 0 && s' <= 59) $ customFailure $ parseErrorAtRegion off5 off6 "invalid time (bad second)" -- we do not support leap seconds pure s' pure $ TimeOfDay h' m' (fromIntegral s') twoDigitDecimal :: TextParser m Int twoDigitDecimal = do d1 <- digitToInt <$> digitChar d2 <- digitToInt <$> (digitChar "a second digit") pure $ d1*10 + d2 timeZone :: TextParser m String timeZone = do plusminus <- satisfy $ \c -> c == '-' || c == '+' fourDigits <- count 4 (digitChar "a digit (for a time zone)") pure $ plusminus:fourDigits secondarydatep :: Day -> TextParser m Day secondarydatep primaryDate = char '=' *> datep' (Just primaryYear) where primaryYear = first3 $ toGregorian primaryDate -- | Parse a year number or an Int. Years must contain at least four -- digits. yearorintp :: TextParser m (Either Year Int) yearorintp = do yearOrMonth <- takeWhile1P (Just "digit") isDigit let n = readDecimal yearOrMonth return $ if T.length yearOrMonth >= 4 then Left n else Right (fromInteger n) --- *** account names -- | Parse an account name (plus one following space if present), -- then apply any parent account prefix and/or account aliases currently in effect, -- in that order. (Ie first add the parent account prefix, then rewrite with aliases). -- This calls error if any account alias with an invalid regular expression exists. modifiedaccountnamep :: JournalParser m AccountName modifiedaccountnamep = do parent <- getParentAccount als <- getAccountAliases -- off1 <- getOffset a <- lift accountnamep -- off2 <- getOffset -- XXX or accountNameApplyAliasesMemo ? doesn't seem to make a difference (retest that function) case accountNameApplyAliases als $ joinAccountNames parent a of Right a' -> return $! a' -- should not happen, regexaliasp will have displayed a better error already: -- (XXX why does customFailure cause error to be displayed there, but not here ?) -- Left e -> customFailure $! parseErrorAtRegion off1 off2 err Left e -> error' err -- PARTIAL: where err = "problem in account alias applied to "++T.unpack a++": "++e -- | Parse an account name, plus one following space if present. -- Account names have one or more parts separated by the account separator character, -- and are terminated by two or more spaces (or end of input). -- Each part is at least one character long, may have single spaces inside it, -- and starts with a non-whitespace. -- Note, this means "{account}", "%^!" and ";comment" are all accepted -- (parent parsers usually prevent/consume the last). -- It should have required parts to start with an alphanumeric; -- for now it remains as-is for backwards compatibility. accountnamep :: TextParser m AccountName accountnamep = singlespacedtext1p -- | Parse possibly empty text, including whitespace, -- until a comment start (semicolon) or newline. noncommenttextp :: TextParser m T.Text noncommenttextp = takeWhileP Nothing (\c -> not $ isSameLineCommentStart c || isNewline c) -- | Parse non-empty text, including whitespace, -- until a comment start (semicolon) or newline. noncommenttext1p :: TextParser m T.Text noncommenttext1p = takeWhile1P Nothing (\c -> not $ isSameLineCommentStart c || isNewline c) -- | Parse non-empty, single-spaced text starting and ending with non-whitespace, -- until a double space or newline. singlespacedtext1p :: TextParser m T.Text singlespacedtext1p = singlespacedtextsatisfying1p (const True) -- | Parse non-empty, single-spaced text starting and ending with non-whitespace, -- until a comment start (semicolon), double space, or newline. singlespacednoncommenttext1p :: TextParser m T.Text singlespacednoncommenttext1p = singlespacedtextsatisfying1p (not . isSameLineCommentStart) -- | Parse non-empty, single-spaced text starting and ending with non-whitespace, -- where all characters satisfy the given predicate. singlespacedtextsatisfying1p :: (Char -> Bool) -> TextParser m T.Text singlespacedtextsatisfying1p f = do firstPart <- partp otherParts <- many $ try $ singlespacep *> partp pure $! T.unwords $ firstPart : otherParts where partp = takeWhile1P Nothing (\c -> f c && not (isSpace c)) -- | Parse one non-newline whitespace character that is not followed by another one. singlespacep :: TextParser m () singlespacep = spacenonewline *> notFollowedBy spacenonewline --- *** amounts -- | Parse whitespace then an amount, or return the special "missing" marker amount. spaceandamountormissingp :: JournalParser m MixedAmount spaceandamountormissingp = option missingmixedamt $ try $ do lift $ skipNonNewlineSpaces1 mixedAmount <$> amountp -- | Parse a single-commodity amount, applying the default commodity if there is no commodity symbol; -- optionally followed by, in any order: -- a Ledger-style cost, Ledger-style valuation expression, and/or Ledger-style cost basis, which is one or more of -- lot cost, lot date, and/or lot note (we loosely call this triple the lot's cost basis). -- The cost basis makes it a lot rather than just an amount. Both cost basis info and valuation expression -- are discarded for now. -- The main amount's sign is significant; here are the possibilities and their interpretation. -- Also imagine an optional VALUATIONEXPR added to any of these (omitted for clarity): -- @ -- -- AMT -- acquiring an amount -- AMT COST -- acquiring an amount at some cost -- AMT COST COSTBASIS -- acquiring a lot at some cost, saving its cost basis -- AMT COSTBASIS COST -- like the above -- AMT COSTBASIS -- like the above with cost same as the cost basis -- -- -AMT -- releasing an amount -- -AMT SELLPRICE -- releasing an amount at some selling price -- -AMT SELLPRICE COSTBASISSEL -- releasing a lot at some selling price, selecting it by its cost basis -- -AMT COSTBASISSEL SELLPRICE -- like the above -- -AMT COSTBASISSEL -- like the above with selling price same as the selected lot's cost basis amount -- -- COST/SELLPRICE can be @ UNITAMT, @@ TOTALAMT, (@) UNITAMT, or (@@) TOTALAMT. The () are ignored. -- COSTBASIS is one or more of {LOTCOST}, [LOTDATE], (LOTNOTE), in any order, with LOTCOST defaulting to COST. -- COSTBASISSEL is one or more of {LOTCOST}, [LOTDATE], (LOTNOTE), in any order. -- {LOTCOST} can be {UNITAMT}, {{TOTALAMT}}, {=UNITAMT}, or {{=TOTALAMT}}. The = is ignored. -- VALUATIONEXPR can be ((VALUE AMOUNT)) or ((VALUE FUNCTION)). -- -- @ -- Ledger amount syntax is really complex. -- Rule of thumb: curly braces, parentheses, and/or square brackets -- in an amount means a Ledger-style cost basis is involved. -- -- To parse an amount's numeric quantity we need to know which character -- represents a decimal mark. We find it in one of three ways: -- -- 1. If a decimal mark has been set explicitly in the journal parse state, -- we use that -- -- 2. Or if the journal has a commodity declaration for the amount's commodity, -- we get the decimal mark from that -- -- 3. Otherwise we will parse any valid decimal mark appearing in the -- number, as long as the number appears well formed. -- (This means we handle files with any supported decimal mark without configuration, -- but it also allows different decimal marks in different amounts, -- which is a bit too loose. There's an open issue.) -- amountp :: JournalParser m Amount amountp = amountp' False -- An amount with optional cost, valuation, and/or cost basis, as described above. -- A flag indicates whether we are parsing a multiplier amount; -- if not, a commodity-less amount will have the default commodity applied to it. amountp' :: Bool -> JournalParser m Amount amountp' mult = -- dbg "amountp'" $ label "amount" $ do let spaces = lift $ skipNonNewlineSpaces amt <- simpleamountp mult <* spaces (mcost, _valuationexpr, _mlotcost, _mlotdate, _mlotnote) <- runPermutation $ -- costp, valuationexprp, lotnotep all parse things beginning with parenthesis, try needed (,,,,) <$> toPermutationWithDefault Nothing (Just <$> try (costp amt) <* spaces) <*> toPermutationWithDefault Nothing (Just <$> valuationexprp <* spaces) -- XXX no try needed here ? <*> toPermutationWithDefault Nothing (Just <$> lotcostp <* spaces) <*> toPermutationWithDefault Nothing (Just <$> lotdatep <* spaces) <*> toPermutationWithDefault Nothing (Just <$> lotnotep <* spaces) pure $ amt { aprice = mcost } -- An amount with optional cost, but no cost basis. amountnobasisp :: JournalParser m Amount amountnobasisp = -- dbg "amountnobasisp" $ label "amount" $ do let spaces = lift $ skipNonNewlineSpaces amt <- simpleamountp False spaces mprice <- optional $ costp amt <* spaces pure $ amt { aprice = mprice } -- An amount with no cost or cost basis. -- A flag indicates whether we are parsing a multiplier amount; -- if not, a commodity-less amount will have the default commodity applied to it. simpleamountp :: Bool -> JournalParser m Amount simpleamountp mult = -- dbg "simpleamountp" $ do sign <- lift signp leftsymbolamountp sign <|> rightornosymbolamountp sign where -- An amount with commodity symbol on the left. leftsymbolamountp :: (Decimal -> Decimal) -> JournalParser m Amount leftsymbolamountp sign = label "amount" $ do c <- lift commoditysymbolp mdecmarkStyle <- getDecimalMarkStyle mcommodityStyle <- getAmountStyle c -- XXX amounts of this commodity in periodic transaction rules and auto posting rules ? #1461 let suggestedStyle = mdecmarkStyle <|> mcommodityStyle commodityspaced <- lift skipNonNewlineSpaces' sign2 <- lift $ signp offBeforeNum <- getOffset ambiguousRawNum <- lift rawnumberp mExponent <- lift $ optional $ try exponentp offAfterNum <- getOffset let numRegion = (offBeforeNum, offAfterNum) (q,prec,mdec,mgrps) <- lift $ interpretNumber numRegion suggestedStyle ambiguousRawNum mExponent let s = amountstyle{ascommodityside=L, ascommodityspaced=commodityspaced, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} return nullamt{acommodity=c, aquantity=sign (sign2 q), astyle=s, aprice=Nothing} -- An amount with commodity symbol on the right or no commodity symbol. -- A no-symbol amount will have the default commodity applied to it -- unless we are parsing a multiplier amount (*AMT). rightornosymbolamountp :: (Decimal -> Decimal) -> JournalParser m Amount rightornosymbolamountp sign = label "amount" $ do offBeforeNum <- getOffset ambiguousRawNum <- lift rawnumberp mExponent <- lift $ optional $ try exponentp offAfterNum <- getOffset let numRegion = (offBeforeNum, offAfterNum) mSpaceAndCommodity <- lift $ optional $ try $ (,) <$> skipNonNewlineSpaces' <*> commoditysymbolp case mSpaceAndCommodity of -- right symbol amount Just (commodityspaced, c) -> do mdecmarkStyle <- getDecimalMarkStyle mcommodityStyle <- getAmountStyle c -- XXX amounts of this commodity in periodic transaction rules and auto posting rules ? #1461 let msuggestedStyle = mdecmarkStyle <|> mcommodityStyle (q,prec,mdec,mgrps) <- lift $ interpretNumber numRegion msuggestedStyle ambiguousRawNum mExponent let s = amountstyle{ascommodityside=R, ascommodityspaced=commodityspaced, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps} return nullamt{acommodity=c, aquantity=sign q, astyle=s, aprice=Nothing} -- no symbol amount Nothing -> do -- look for a number style to use when parsing, based on -- these things we've already parsed, in this order of preference: mdecmarkStyle <- getDecimalMarkStyle -- a decimal-mark CSV rule mcommodityStyle <- getAmountStyle "" -- a commodity directive for the no-symbol commodity mdefaultStyle <- getDefaultAmountStyle -- a D default commodity directive -- XXX no-symbol amounts in periodic transaction rules and auto posting rules ? #1461 let msuggestedStyle = mdecmarkStyle <|> mcommodityStyle <|> mdefaultStyle (q,prec,mdec,mgrps) <- lift $ interpretNumber numRegion msuggestedStyle ambiguousRawNum mExponent -- if a default commodity has been set, apply it and its style to this amount -- (unless it's a multiplier in an automated posting) defcs <- getDefaultCommodityAndStyle let (c,s) = case (mult, defcs) of (False, Just (defc,defs)) -> (defc, defs{asprecision=max (asprecision defs) prec}) _ -> ("", amountstyle{asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}) return nullamt{acommodity=c, aquantity=sign q, astyle=s, aprice=Nothing} -- For reducing code duplication. Doesn't parse anything. Has the type -- of a parser only in order to throw parse errors (for convenience). interpretNumber :: (Int, Int) -- offsets -> Maybe AmountStyle -> Either AmbiguousNumber RawNumber -> Maybe Integer -> TextParser m (Quantity, AmountPrecision, Maybe Char, Maybe DigitGroupStyle) interpretNumber posRegion msuggestedStyle ambiguousNum mExp = let rawNum = either (disambiguateNumber msuggestedStyle) id ambiguousNum in case fromRawNumber rawNum mExp of Left errMsg -> customFailure $ uncurry parseErrorAtRegion posRegion errMsg Right (q,p,d,g) -> pure (q, Precision p, d, g) -- | Try to parse a single-commodity amount from a string parseamount :: String -> Either HledgerParseErrors Amount parseamount s = runParser (evalStateT (amountp <* eof) nulljournal) "" (T.pack s) -- | Parse a single-commodity amount from a string, or get an error. parseamount' :: String -> Amount parseamount' s = case parseamount s of Right amt -> amt Left err -> error' $ show err -- PARTIAL: XXX should throwError -- | Like parseamount', but returns a MixedAmount. parsemixedamount :: String -> Either HledgerParseErrors MixedAmount parsemixedamount = fmap mixedAmount . parseamount -- | Like parseamount', but returns a MixedAmount. parsemixedamount' :: String -> MixedAmount parsemixedamount' = mixedAmount . parseamount' -- | Parse a minus or plus sign followed by zero or more spaces, -- or nothing, returning a function that negates or does nothing. signp :: Num a => TextParser m (a -> a) signp = ((char '-' $> negate <|> char '+' $> id) <* skipNonNewlineSpaces) <|> pure id commoditysymbolp :: TextParser m CommoditySymbol commoditysymbolp = quotedcommoditysymbolp <|> simplecommoditysymbolp "commodity symbol" quotedcommoditysymbolp :: TextParser m CommoditySymbol quotedcommoditysymbolp = between (char '"') (char '"') $ takeWhileP Nothing f where f c = c /= ';' && c /= '\n' && c /= '\"' simplecommoditysymbolp :: TextParser m CommoditySymbol simplecommoditysymbolp = takeWhile1P Nothing (not . isNonsimpleCommodityChar) -- | Ledger-style cost notation: -- @ UNITAMT, @@ TOTALAMT, (@) UNITAMT, or (@@) TOTALAMT. The () are ignored. costp :: Amount -> JournalParser m AmountPrice costp baseAmt = -- dbg "costp" $ label "transaction price" $ do -- https://www.ledger-cli.org/3.0/doc/ledger3.html#Virtual-posting-costs parenthesised <- option False $ char '(' >> pure True char '@' totalPrice <- char '@' $> True <|> pure False when parenthesised $ void $ char ')' lift skipNonNewlineSpaces priceAmount <- simpleamountp False -- "unpriced amount (specifying a price)" let amtsign' = signum $ aquantity baseAmt amtsign = if amtsign' == 0 then 1 else amtsign' pure $ if totalPrice then TotalPrice priceAmount{aquantity=amtsign * aquantity priceAmount} else UnitPrice priceAmount -- | A valuation function or value can be written in double parentheses after an amount. valuationexprp :: JournalParser m () valuationexprp = -- dbg "valuationexprp" $ label "valuation expression" $ do string "((" _ <- T.strip . T.pack <$> (many $ noneOf [')','\n']) -- XXX other line endings ? string "))" return () balanceassertionp :: JournalParser m BalanceAssertion balanceassertionp = do sourcepos <- getSourcePos char '=' istotal <- fmap isJust $ optional $ try $ char '=' isinclusive <- fmap isJust $ optional $ try $ char '*' lift skipNonNewlineSpaces -- this amount can have a cost, but not a cost basis. -- balance assertions ignore it, but balance assignments will use it a <- amountnobasisp "amount (for a balance assertion or assignment)" return BalanceAssertion { baamount = a , batotal = istotal , bainclusive = isinclusive , baposition = sourcepos } -- Parse a Ledger-style lot cost, -- {UNITCOST} or {{TOTALCOST}} or {=FIXEDUNITCOST} or {{=FIXEDTOTALCOST}}, -- and discard it. lotcostp :: JournalParser m () lotcostp = -- dbg "lotcostp" $ label "ledger-style lot cost" $ do char '{' doublebrace <- option False $ char '{' >> pure True _fixed <- fmap isJust $ optional $ lift skipNonNewlineSpaces >> char '=' lift skipNonNewlineSpaces _a <- simpleamountp False lift skipNonNewlineSpaces char '}' when (doublebrace) $ void $ char '}' -- Parse a Ledger-style [LOTDATE], and discard it. lotdatep :: JournalParser m () lotdatep = -- dbg "lotdatep" $ label "ledger-style lot date" $ do char '[' lift skipNonNewlineSpaces _d <- datep lift skipNonNewlineSpaces char ']' return () -- Parse a Ledger-style (LOT NOTE), and discard it. lotnotep :: JournalParser m () lotnotep = -- dbg "lotnotep" $ label "ledger-style lot note" $ do char '(' lift skipNonNewlineSpaces _note <- stripEnd . T.pack <$> (many $ noneOf [')','\n']) -- XXX other line endings ? char ')' return () -- | Parse a string representation of a number for its value and display -- attributes. -- -- Some international number formats are accepted, eg either period or comma -- may be used for the decimal mark, and the other of these may be used for -- separating digit groups in the integer part. See -- http://en.wikipedia.org/wiki/Decimal_separator for more examples. -- -- This returns: the parsed numeric value, the precision (number of digits -- seen following the decimal mark), the decimal mark character used if any, -- and the digit group style if any. -- numberp :: Maybe AmountStyle -> TextParser m (Quantity, Word8, Maybe Char, Maybe DigitGroupStyle) numberp suggestedStyle = label "number" $ do -- a number is an optional sign followed by a sequence of digits possibly -- interspersed with periods, commas, or both -- dbgparse 0 "numberp" sign <- signp rawNum <- either (disambiguateNumber suggestedStyle) id <$> rawnumberp mExp <- optional $ try $ exponentp dbg7 "numberp suggestedStyle" suggestedStyle `seq` return () case dbg7 "numberp quantity,precision,mdecimalpoint,mgrps" $ fromRawNumber rawNum mExp of Left errMsg -> Fail.fail errMsg Right (q, p, d, g) -> pure (sign q, p, d, g) exponentp :: TextParser m Integer exponentp = char' 'e' *> signp <*> decimal "exponent" -- | Interpret a raw number as a decimal number. -- -- Returns: -- - the decimal number -- - the precision (number of digits after the decimal point) -- - the decimal point character, if any -- - the digit group style, if any (digit group character and sizes of digit groups) fromRawNumber :: RawNumber -> Maybe Integer -> Either String (Quantity, Word8, Maybe Char, Maybe DigitGroupStyle) fromRawNumber (WithSeparators{}) (Just _) = Left "invalid number: digit separators and exponents may not be used together" fromRawNumber raw mExp = do (quantity, precision) <- toQuantity (fromMaybe 0 mExp) (digitGroup raw) (decimalGroup raw) return (quantity, precision, mDecPt raw, digitGroupStyle raw) where toQuantity :: Integer -> DigitGrp -> DigitGrp -> Either String (Quantity, Word8) toQuantity e preDecimalGrp postDecimalGrp | precision < 0 = Right (Decimal 0 (digitGrpNum * 10^(-precision)), 0) | precision < 256 = Right (Decimal precision8 digitGrpNum, precision8) | otherwise = Left "invalid number: numbers with more than 255 decimal places are currently not supported" where digitGrpNum = digitGroupNumber $ preDecimalGrp <> postDecimalGrp precision = toInteger (digitGroupLength postDecimalGrp) - e precision8 = fromIntegral precision :: Word8 mDecPt (NoSeparators _ mDecimals) = fst <$> mDecimals mDecPt (WithSeparators _ _ mDecimals) = fst <$> mDecimals decimalGroup (NoSeparators _ mDecimals) = maybe mempty snd mDecimals decimalGroup (WithSeparators _ _ mDecimals) = maybe mempty snd mDecimals digitGroup (NoSeparators digitGrp _) = digitGrp digitGroup (WithSeparators _ digitGrps _) = mconcat digitGrps digitGroupStyle (NoSeparators _ _) = Nothing digitGroupStyle (WithSeparators sep grps _) = Just . DigitGroups sep $ groupSizes grps -- Outputs digit group sizes from least significant to most significant groupSizes :: [DigitGrp] -> [Word8] groupSizes digitGrps = reverse $ case map (fromIntegral . digitGroupLength) digitGrps of (a:b:cs) | a < b -> b:cs gs -> gs disambiguateNumber :: Maybe AmountStyle -> AmbiguousNumber -> RawNumber disambiguateNumber msuggestedStyle (AmbiguousNumber grp1 sep grp2) = -- If present, use the suggested style to disambiguate; -- otherwise, assume that the separator is a decimal point where possible. if isDecimalMark sep && maybe True (sep `isValidDecimalBy`) msuggestedStyle then NoSeparators grp1 (Just (sep, grp2)) else WithSeparators sep [grp1, grp2] Nothing where isValidDecimalBy :: Char -> AmountStyle -> Bool isValidDecimalBy c = \case AmountStyle{asdecimalpoint = Just d} -> d == c AmountStyle{asdigitgroups = Just (DigitGroups g _)} -> g /= c AmountStyle{asprecision = Precision 0} -> False _ -> True -- | Parse and interpret the structure of a number without external hints. -- Numbers are digit strings, possibly separated into digit groups by one -- of two types of separators. (1) Numbers may optionally have a decimal -- mark, which may be either a period or comma. (2) Numbers may -- optionally contain digit group marks, which must all be either a -- period, a comma, or a space. -- -- It is our task to deduce the characters used as decimal mark and -- digit group mark, based on the allowed syntax. For instance, we -- make use of the fact that a decimal mark can occur at most once and -- must be to the right of all digit group marks. -- -- >>> parseTest rawnumberp "1,234,567.89" -- Right (WithSeparators ',' ["1","234","567"] (Just ('.',"89"))) -- >>> parseTest rawnumberp "1,000" -- Left (AmbiguousNumber "1" ',' "000") -- >>> parseTest rawnumberp "1 000" -- Right (WithSeparators ' ' ["1","000"] Nothing) -- rawnumberp :: TextParser m (Either AmbiguousNumber RawNumber) rawnumberp = label "number" $ do rawNumber <- fmap Right leadingDecimalPt <|> leadingDigits -- Guard against mistyped numbers mExtraDecimalSep <- optional $ lookAhead $ satisfy isDecimalMark when (isJust mExtraDecimalSep) $ Fail.fail "invalid number (invalid use of separator)" mExtraFragment <- optional $ lookAhead $ try $ char ' ' *> getOffset <* digitChar case mExtraFragment of Just off -> customFailure $ parseErrorAt off "invalid number (excessive trailing digits)" Nothing -> pure () return $ dbg7 "rawnumberp" rawNumber where leadingDecimalPt :: TextParser m RawNumber leadingDecimalPt = do decPt <- satisfy isDecimalMark decGrp <- digitgroupp pure $ NoSeparators mempty (Just (decPt, decGrp)) leadingDigits :: TextParser m (Either AmbiguousNumber RawNumber) leadingDigits = do grp1 <- digitgroupp withSeparators grp1 <|> fmap Right (trailingDecimalPt grp1) <|> pure (Right $ NoSeparators grp1 Nothing) withSeparators :: DigitGrp -> TextParser m (Either AmbiguousNumber RawNumber) withSeparators grp1 = do (sep, grp2) <- try $ (,) <$> satisfy isDigitSeparatorChar <*> digitgroupp grps <- many $ try $ char sep *> digitgroupp let digitGroups = grp1 : grp2 : grps fmap Right (withDecimalPt sep digitGroups) <|> pure (withoutDecimalPt grp1 sep grp2 grps) withDecimalPt :: Char -> [DigitGrp] -> TextParser m RawNumber withDecimalPt digitSep digitGroups = do decPt <- satisfy $ \c -> isDecimalMark c && c /= digitSep decDigitGrp <- option mempty digitgroupp pure $ WithSeparators digitSep digitGroups (Just (decPt, decDigitGrp)) withoutDecimalPt :: DigitGrp -> Char -> DigitGrp -> [DigitGrp] -> Either AmbiguousNumber RawNumber withoutDecimalPt grp1 sep grp2 grps | null grps && isDecimalMark sep = Left $ AmbiguousNumber grp1 sep grp2 | otherwise = Right $ WithSeparators sep (grp1:grp2:grps) Nothing trailingDecimalPt :: DigitGrp -> TextParser m RawNumber trailingDecimalPt grp1 = do decPt <- satisfy isDecimalMark pure $ NoSeparators grp1 (Just (decPt, mempty)) isDigitSeparatorChar :: Char -> Bool isDigitSeparatorChar c = isDecimalMark c || c == ' ' -- | Some kinds of number literal we might parse. data RawNumber = NoSeparators DigitGrp (Maybe (Char, DigitGrp)) -- ^ A number with no digit group marks (eg 100), -- or with a leading or trailing comma or period -- which (apparently) we interpret as a decimal mark (like 100. or .100) | WithSeparators Char [DigitGrp] (Maybe (Char, DigitGrp)) -- ^ A number with identifiable digit group marks -- (eg 1,000,000 or 1,000.50 or 1 000) deriving (Show, Eq) -- | Another kind of number literal: this one contains either a digit -- group separator or a decimal mark, we're not sure which (eg 1,000 or 100.50). data AmbiguousNumber = AmbiguousNumber DigitGrp Char DigitGrp deriving (Show, Eq) -- | Description of a single digit group in a number literal. -- "Thousands" is one well known digit grouping, but there are others. data DigitGrp = DigitGrp { digitGroupLength :: !Word, -- ^ The number of digits in this group. -- This is Word to avoid the need to do overflow -- checking for the Semigroup instance of DigitGrp. digitGroupNumber :: !Integer -- ^ The natural number formed by this group's digits. This should always be positive. } deriving (Eq) -- | A custom show instance, showing digit groups as the parser saw them. instance Show DigitGrp where show (DigitGrp len n) = "\"" ++ padding ++ numStr ++ "\"" where numStr = show n padding = genericReplicate (toInteger len - toInteger (length numStr)) '0' instance Sem.Semigroup DigitGrp where DigitGrp l1 n1 <> DigitGrp l2 n2 = DigitGrp (l1 + l2) (n1 * 10^l2 + n2) instance Monoid DigitGrp where mempty = DigitGrp 0 0 mappend = (Sem.<>) digitgroupp :: TextParser m DigitGrp digitgroupp = label "digits" $ makeGroup <$> takeWhile1P (Just "digit") isDigit where makeGroup = uncurry DigitGrp . T.foldl' step (0, 0) step (!l, !a) c = (l+1, a*10 + fromIntegral (digitToInt c)) --- *** comments multilinecommentp :: TextParser m () multilinecommentp = startComment *> anyLine `skipManyTill` endComment where startComment = string "comment" *> trailingSpaces endComment = eof <|> string "end comment" *> trailingSpaces trailingSpaces = skipNonNewlineSpaces <* newline anyLine = void $ takeWhileP Nothing (/='\n') *> newline {-# INLINABLE multilinecommentp #-} -- | A blank or comment line in journal format: a line that's empty or -- containing only whitespace or whose first non-whitespace character -- is semicolon, hash, or star. emptyorcommentlinep :: TextParser m () emptyorcommentlinep = do skipNonNewlineSpaces skiplinecommentp <|> void newline where skiplinecommentp :: TextParser m () skiplinecommentp = do satisfy isLineCommentStart void $ takeWhileP Nothing (/= '\n') optional newline pure () {-# INLINABLE emptyorcommentlinep #-} -- | Is this a character that, as the first non-whitespace on a line, -- starts a comment line ? isLineCommentStart :: Char -> Bool isLineCommentStart '#' = True isLineCommentStart '*' = True isLineCommentStart ';' = True isLineCommentStart _ = False -- | Is this a character that, appearing anywhere within a line, -- starts a comment ? isSameLineCommentStart :: Char -> Bool isSameLineCommentStart ';' = True isSameLineCommentStart _ = False -- A parser for (possibly multiline) comments following a journal item. -- -- Comments following a journal item begin with a semicolon and extend to -- the end of the line. They may span multiple lines; any comment lines -- not on the same line as the journal item must be indented (preceded by -- leading whitespace). -- -- Like Ledger, we sometimes allow data to be embedded in comments. Eg, -- comments on the account directive and on transactions can contain tags, -- and comments on postings can contain tags and/or bracketed posting dates. -- To handle these variations, this parser takes as parameter a subparser, -- which should consume all input up until the next newline, and which can -- optionally extract some kind of data from it. -- followingcommentp' returns this data along with the full text of the comment. -- -- See followingcommentp for tests. -- followingcommentp' :: (Monoid a, Show a) => TextParser m a -> TextParser m (Text, a) followingcommentp' contentp = do skipNonNewlineSpaces -- there can be 0 or 1 sameLine sameLine <- try headerp *> ((:[]) <$> match' contentp) <|> pure [] _ <- eolof -- there can be 0 or more nextLines nextLines <- many $ try (skipNonNewlineSpaces1 *> headerp) *> match' contentp <* eolof let -- if there's just a next-line comment, insert an empty same-line comment -- so the next-line comment doesn't get rendered as a same-line comment. sameLine' | null sameLine && not (null nextLines) = [("",mempty)] | otherwise = sameLine (texts, contents) = unzip $ sameLine' ++ nextLines strippedCommentText = T.unlines $ map T.strip texts commentContent = mconcat contents pure (strippedCommentText, commentContent) where headerp = char ';' *> skipNonNewlineSpaces {-# INLINABLE followingcommentp' #-} -- | Parse the text of a (possibly multiline) comment following a journal item. -- -- >>> rtp followingcommentp "" -- no comment -- Right "" -- >>> rtp followingcommentp ";" -- just a (empty) same-line comment. newline is added -- Right "\n" -- >>> rtp followingcommentp "; \n" -- Right "\n" -- >>> rtp followingcommentp ";\n ;\n" -- a same-line and a next-line comment -- Right "\n\n" -- >>> rtp followingcommentp "\n ;\n" -- just a next-line comment. Insert an empty same-line comment so the next-line comment doesn't become a same-line comment. -- Right "\n\n" -- followingcommentp :: TextParser m Text followingcommentp = fst <$> followingcommentp' (void $ takeWhileP Nothing (/= '\n')) -- XXX support \r\n ? {-# INLINABLE followingcommentp #-} -- | Parse a transaction comment and extract its tags. -- -- The first line of a transaction may be followed by comments, which -- begin with semicolons and extend to the end of the line. Transaction -- comments may span multiple lines, but comment lines below the -- transaction must be preceded by leading whitespace. -- -- 2000/1/1 ; a transaction comment starting on the same line ... -- ; extending to the next line -- account1 $1 -- account2 -- -- Tags are name-value pairs. -- -- >>> let getTags (_,tags) = tags -- >>> let parseTags = fmap getTags . rtp transactioncommentp -- -- >>> parseTags "; name1: val1, name2:all this is value2" -- Right [("name1","val1"),("name2","all this is value2")] -- -- A tag's name must be immediately followed by a colon, without -- separating whitespace. The corresponding value consists of all the text -- following the colon up until the next colon or newline, stripped of -- leading and trailing whitespace. -- transactioncommentp :: TextParser m (Text, [Tag]) transactioncommentp = followingcommentp' commenttagsp {-# INLINABLE transactioncommentp #-} commenttagsp :: TextParser m [Tag] commenttagsp = do tagName <- (last . T.split isSpace) <$> takeWhileP Nothing (\c -> c /= ':' && c /= '\n') atColon tagName <|> pure [] -- if not ':', then either '\n' or EOF where atColon :: Text -> TextParser m [Tag] atColon name = char ':' *> do if T.null name then commenttagsp else do skipNonNewlineSpaces val <- tagValue let tag = (name, val) (tag:) <$> commenttagsp tagValue :: TextParser m Text tagValue = do val <- T.strip <$> takeWhileP Nothing (\c -> c /= ',' && c /= '\n') _ <- optional $ char ',' pure val {-# INLINABLE commenttagsp #-} -- | Parse a posting comment and extract its tags and dates. -- -- Postings may be followed by comments, which begin with semicolons and -- extend to the end of the line. Posting comments may span multiple -- lines, but comment lines below the posting must be preceded by -- leading whitespace. -- -- 2000/1/1 -- account1 $1 ; a posting comment starting on the same line ... -- ; extending to the next line -- -- account2 -- ; a posting comment beginning on the next line -- -- Tags are name-value pairs. -- -- >>> let getTags (_,tags,_,_) = tags -- >>> let parseTags = fmap getTags . rtp (postingcommentp Nothing) -- -- >>> parseTags "; name1: val1, name2:all this is value2" -- Right [("name1","val1"),("name2","all this is value2")] -- -- A tag's name must be immediately followed by a colon, without -- separating whitespace. The corresponding value consists of all the text -- following the colon up until the next colon or newline, stripped of -- leading and trailing whitespace. -- -- Posting dates may be expressed with "date"/"date2" tags or with -- bracketed date syntax. Posting dates will inherit their year from the -- transaction date if the year is not specified. We throw parse errors on -- invalid dates. -- -- >>> let getDates (_,_,d1,d2) = (d1, d2) -- >>> let parseDates = fmap getDates . rtp (postingcommentp (Just 2000)) -- -- >>> parseDates "; date: 1/2, date2: 1999/12/31" -- Right (Just 2000-01-02,Just 1999-12-31) -- >>> parseDates "; [1/2=1999/12/31]" -- Right (Just 2000-01-02,Just 1999-12-31) -- -- Example: tags, date tags, and bracketed dates -- >>> rtp (postingcommentp (Just 2000)) "; a:b, date:3/4, [=5/6]" -- Right ("a:b, date:3/4, [=5/6]\n",[("a","b"),("date","3/4")],Just 2000-03-04,Just 2000-05-06) -- -- Example: extraction of dates from date tags ignores trailing text -- >>> rtp (postingcommentp (Just 2000)) "; date:3/4=5/6" -- Right ("date:3/4=5/6\n",[("date","3/4=5/6")],Just 2000-03-04,Nothing) -- postingcommentp :: Maybe Year -> TextParser m (Text, [Tag], Maybe Day, Maybe Day) postingcommentp mYear = do (commentText, (tags, dateTags)) <- followingcommentp' (commenttagsanddatesp mYear) let mdate = snd <$> find ((=="date") .fst) dateTags mdate2 = snd <$> find ((=="date2").fst) dateTags pure (commentText, tags, mdate, mdate2) {-# INLINABLE postingcommentp #-} commenttagsanddatesp :: Maybe Year -> TextParser m ([Tag], [DateTag]) commenttagsanddatesp mYear = do (txt, dateTags) <- match $ readUpTo ':' -- next char is either ':' or '\n' (or EOF) let tagName = last (T.split isSpace txt) (fmap.second) (dateTags++) (atColon tagName) <|> pure ([], dateTags) -- if not ':', then either '\n' or EOF where readUpTo :: Char -> TextParser m [DateTag] readUpTo end = do void $ takeWhileP Nothing (\c -> c /= end && c /= '\n' && c /= '[') -- if not '[' then ':' or '\n' or EOF atBracket (readUpTo end) <|> pure [] atBracket :: TextParser m [DateTag] -> TextParser m [DateTag] atBracket cont = do -- Uses the fact that bracketed date-tags cannot contain newlines dateTags <- option [] $ lookAhead (bracketeddatetagsp mYear) _ <- char '[' dateTags' <- cont pure $ dateTags ++ dateTags' atColon :: Text -> TextParser m ([Tag], [DateTag]) atColon name = char ':' *> do skipNonNewlineSpaces (tags, dateTags) <- case name of "" -> pure ([], []) "date" -> dateValue name "date2" -> dateValue name _ -> tagValue name _ <- optional $ char ',' bimap (tags++) (dateTags++) <$> commenttagsanddatesp mYear dateValue :: Text -> TextParser m ([Tag], [DateTag]) dateValue name = do (txt, (date, dateTags)) <- match' $ do date <- datep' mYear dateTags <- readUpTo ',' pure (date, dateTags) let val = T.strip txt pure $ ( [(name, val)] , (name, date) : dateTags ) tagValue :: Text -> TextParser m ([Tag], [DateTag]) tagValue name = do (txt, dateTags) <- match' $ readUpTo ',' let val = T.strip txt pure $ ( [(name, val)] , dateTags ) {-# INLINABLE commenttagsanddatesp #-} -- | Parse Ledger-style bracketed posting dates ([DATE=DATE2]), as -- "date" and/or "date2" tags. Anything that looks like an attempt at -- this (a square-bracketed sequence of 0123456789/-.= containing at -- least one digit and one date separator) is also parsed, and will -- throw an appropriate error. -- -- The dates are parsed in full here so that errors are reported in -- the right position. A missing year in DATE can be inferred if a -- default date is provided. A missing year in DATE2 will be inferred -- from DATE. -- -- >>> either (Left . customErrorBundlePretty) Right $ rtp (bracketeddatetagsp Nothing) "[2016/1/2=3/4]" -- Right [("date",2016-01-02),("date2",2016-03-04)] -- -- >>> either (Left . customErrorBundlePretty) Right $ rtp (bracketeddatetagsp Nothing) "[1]" -- Left ...not a bracketed date... -- -- >>> either (Left . customErrorBundlePretty) Right $ rtp (bracketeddatetagsp Nothing) "[2016/1/32]" -- Left ...1:2:...This date is invalid... -- -- >>> either (Left . customErrorBundlePretty) Right $ rtp (bracketeddatetagsp Nothing) "[1/31]" -- Left ...1:2:...The partial date 1/31 can not be parsed... -- -- >>> either (Left . customErrorBundlePretty) Right $ rtp (bracketeddatetagsp Nothing) "[0123456789/-.=/-.=]" -- Left ...1:13:...expecting month or day... -- bracketeddatetagsp :: Maybe Year -> TextParser m [(TagName, Day)] bracketeddatetagsp mYear1 = do -- dbgparse 0 "bracketeddatetagsp" try $ do s <- lookAhead $ between (char '[') (char ']') $ takeWhile1P Nothing isBracketedDateChar unless (T.any isDigit s && T.any isDateSepChar s) $ Fail.fail "not a bracketed date" -- Looks sufficiently like a bracketed date to commit to parsing a date between (char '[') (char ']') $ do md1 <- optional $ datep' mYear1 let mYear2 = fmap readYear md1 <|> mYear1 md2 <- optional $ char '=' *> datep' mYear2 pure $ catMaybes [("date",) <$> md1, ("date2",) <$> md2] where readYear = first3 . toGregorian isBracketedDateChar c = isDigit c || isDateSepChar c || c == '=' {-# INLINABLE bracketeddatetagsp #-} -- | Get the account name aliases from options, if any. aliasesFromOpts :: InputOpts -> [AccountAlias] aliasesFromOpts = map (\a -> fromparse $ runParser accountaliasp ("--alias "++quoteIfNeeded a) $ T.pack a) . aliases_ accountaliasp :: TextParser m AccountAlias accountaliasp = regexaliasp <|> basicaliasp basicaliasp :: TextParser m AccountAlias basicaliasp = do -- dbgparse 0 "basicaliasp" old <- rstrip <$> (some $ noneOf ("=" :: [Char])) char '=' skipNonNewlineSpaces new <- rstrip <$> anySingle `manyTill` eolof -- eol in journal, eof in command lines, normally return $ BasicAlias (T.pack old) (T.pack new) regexaliasp :: TextParser m AccountAlias regexaliasp = do -- dbgparse 0 "regexaliasp" (off1, off2, re) <- between (char '/') (char '/') $ do off1 <- getOffset re <- fmap T.concat . some $ (T.singleton <$> noneOf ("/\\\n\r" :: [Char])) -- paranoid: don't try to read past line end <|> string "\\/" -- allow escaping forward slashes <|> (liftM2 T.cons (char '\\') (T.singleton <$> anySingle)) -- Otherwise leave backslashes in off2 <- getOffset return (off1, off2, re) skipNonNewlineSpaces char '=' skipNonNewlineSpaces repl <- anySingle `manyTill` eolof case toRegexCI re of Right r -> return $! RegexAlias r repl Left e -> customFailure $! parseErrorAtRegion off1 off2 e --- ** tests tests_Common = testGroup "Common" [ testGroup "amountp" [ testCase "basic" $ assertParseEq amountp "$47.18" (usd 47.18) ,testCase "ends with decimal mark" $ assertParseEq amountp "$1." (usd 1 `withPrecision` Precision 0) ,testCase "unit price" $ assertParseEq amountp "$10 @ €0.5" -- not precise enough: -- (usd 10 `withPrecision` 0 `at` (eur 0.5 `withPrecision` 1)) -- `withStyle` asdecimalpoint=Just '.' nullamt{ acommodity="$" ,aquantity=10 -- need to test internal precision with roundTo ? I think not ,astyle=amountstyle{asprecision=Precision 0, asdecimalpoint=Nothing} ,aprice=Just $ UnitPrice $ nullamt{ acommodity="€" ,aquantity=0.5 ,astyle=amountstyle{asprecision=Precision 1, asdecimalpoint=Just '.'} } } ,testCase "total price" $ assertParseEq amountp "$10 @@ €5" nullamt{ acommodity="$" ,aquantity=10 ,astyle=amountstyle{asprecision=Precision 0, asdecimalpoint=Nothing} ,aprice=Just $ TotalPrice $ nullamt{ acommodity="€" ,aquantity=5 ,astyle=amountstyle{asprecision=Precision 0, asdecimalpoint=Nothing} } } ,testCase "unit price, parenthesised" $ assertParse amountp "$10 (@) €0.5" ,testCase "total price, parenthesised" $ assertParse amountp "$10 (@@) €0.5" ] ,let p = lift (numberp Nothing) :: JournalParser IO (Quantity, Word8, Maybe Char, Maybe DigitGroupStyle) in testCase "numberp" $ do assertParseEq p "0" (0, 0, Nothing, Nothing) assertParseEq p "1" (1, 0, Nothing, Nothing) assertParseEq p "1.1" (1.1, 1, Just '.', Nothing) assertParseEq p "1,000.1" (1000.1, 1, Just '.', Just $ DigitGroups ',' [3]) assertParseEq p "1.00.000,1" (100000.1, 1, Just ',', Just $ DigitGroups '.' [3,2]) assertParseEq p "1,000,000" (1000000, 0, Nothing, Just $ DigitGroups ',' [3,3]) -- could be simplified to [3] assertParseEq p "1." (1, 0, Just '.', Nothing) assertParseEq p "1," (1, 0, Just ',', Nothing) assertParseEq p ".1" (0.1, 1, Just '.', Nothing) assertParseEq p ",1" (0.1, 1, Just ',', Nothing) assertParseError p "" "" assertParseError p "1,000.000,1" "" assertParseError p "1.000,000.1" "" assertParseError p "1,000.000.1" "" assertParseError p "1,,1" "" assertParseError p "1..1" "" assertParseError p ".1," "" assertParseError p ",1." "" assertParseEq p "1.555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555" (1.555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555, 255, Just '.', Nothing) assertParseError p "1.5555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555" "" ,testGroup "spaceandamountormissingp" [ testCase "space and amount" $ assertParseEq spaceandamountormissingp " $47.18" (mixedAmount $ usd 47.18) ,testCase "empty string" $ assertParseEq spaceandamountormissingp "" missingmixedamt -- ,testCase "just space" $ assertParseEq spaceandamountormissingp " " missingmixedamt -- XXX should it ? -- ,testCase "just amount" $ assertParseError spaceandamountormissingp "$47.18" "" -- succeeds, consuming nothing ] ] hledger-lib-1.30/Hledger/Read/CsvReader.hs0000644000000000000000000000451014434445206016446 0ustar0000000000000000--- * -*- outline-regexp:"--- \\*"; -*- --- ** doc -- In Emacs, use TAB on lines beginning with "-- *" to collapse/expand sections. {-| A reader for CSV (character-separated) data. This also reads a rules file to help interpret the CSV data. -} --- ** language {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} --- ** exports module Hledger.Read.CsvReader ( -- * Reader reader, -- * Tests tests_CsvReader, ) where --- ** imports import Prelude hiding (Applicative(..)) import Control.Monad.Except (ExceptT(..), liftEither) import Control.Monad.IO.Class (MonadIO) import Data.Text (Text) import Hledger.Data import Hledger.Utils import Hledger.Read.Common (aliasesFromOpts, Reader(..), InputOpts(..), journalFinalise) import Hledger.Read.RulesReader (readJournalFromCsv) --- ** doctest setup -- $setup -- >>> :set -XOverloadedStrings --- ** reader reader :: MonadIO m => Reader m reader = Reader {rFormat = "csv" ,rExtensions = ["csv","tsv","ssv"] ,rReadFn = parse ,rParser = error' "sorry, CSV files can't be included yet" -- PARTIAL: } -- | Parse and post-process a "Journal" from CSV data, or give an error. -- This currently ignores the provided data, and reads it from the file path instead. -- This file path is normally the CSV(/SSV/TSV) data file, and a corresponding rules file is inferred. -- But it can also be the rules file, in which case the corresponding data file is inferred. -- This does not check balance assertions. parse :: InputOpts -> FilePath -> Text -> ExceptT String IO Journal parse iopts f t = do let mrulesfile = mrules_file_ iopts readJournalFromCsv (Right <$> mrulesfile) f t -- apply any command line account aliases. Can fail with a bad replacement pattern. >>= liftEither . journalApplyAliases (aliasesFromOpts iopts) -- journalFinalise assumes the journal's items are -- reversed, as produced by JournalReader's parser. -- But here they are already properly ordered. So we'd -- better preemptively reverse them once more. XXX inefficient . journalReverse >>= journalFinalise iopts{balancingopts_=(balancingopts_ iopts){ignore_assertions_=True}} f t --- ** tests tests_CsvReader = testGroup "CsvReader" [ ] hledger-lib-1.30/Hledger/Read/CsvUtils.hs0000644000000000000000000000170714434445206016351 0ustar0000000000000000--- * -*- outline-regexp:"--- \\*"; -*- --- ** doc {-| CSV utilities. -} --- ** language {-# LANGUAGE OverloadedStrings #-} --- ** exports module Hledger.Read.CsvUtils ( CSV, CsvRecord, CsvValue, printCSV, -- * Tests tests_CsvUtils, ) where --- ** imports import Prelude hiding (Applicative(..)) import Data.List (intersperse) import Data.Text (Text) import qualified Data.Text as T import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy.Builder as TB import Hledger.Utils --- ** doctest setup -- $setup -- >>> :set -XOverloadedStrings type CSV = [CsvRecord] type CsvRecord = [CsvValue] type CsvValue = Text printCSV :: [CsvRecord] -> TL.Text printCSV = TB.toLazyText . unlinesB . map printRecord where printRecord = foldMap TB.fromText . intersperse "," . map printField printField = wrap "\"" "\"" . T.replace "\"" "\"\"" --- ** tests tests_CsvUtils :: TestTree tests_CsvUtils = testGroup "CsvUtils" [ ] hledger-lib-1.30/Hledger/Read/InputOptions.hs0000644000000000000000000001103714434445206017245 0ustar0000000000000000{-| Various options to use when reading journal files. Similar to CliOptions.inputflags, simplifies the journal-reading functions. -} {-# LANGUAGE TemplateHaskell #-} module Hledger.Read.InputOptions ( -- * Types and helpers for input options InputOpts(..) , HasInputOpts(..) , definputopts , forecastPeriod ) where import Control.Applicative ((<|>)) import Data.Time (Day, addDays) import Hledger.Data.Types import Hledger.Data.Journal (journalEndDate) import Hledger.Data.Dates (nulldate, nulldatespan) import Hledger.Data.Balancing (BalancingOpts(..), HasBalancingOpts(..), defbalancingopts) import Hledger.Utils (dbg2, makeHledgerClassyLenses) data InputOpts = InputOpts { -- files_ :: [FilePath] mformat_ :: Maybe StorageFormat -- ^ a file/storage format to try, unless overridden -- by a filename prefix. Nothing means try all. ,mrules_file_ :: Maybe FilePath -- ^ a conversion rules file to use (when reading CSV) ,aliases_ :: [String] -- ^ account name aliases to apply ,anon_ :: Bool -- ^ do light anonymisation/obfuscation of the data ,new_ :: Bool -- ^ read only new transactions since this file was last read ,new_save_ :: Bool -- ^ save latest new transactions state for next time ,pivot_ :: String -- ^ use the given field's value as the account name ,forecast_ :: Maybe DateSpan -- ^ span in which to generate forecast transactions ,verbose_tags_ :: Bool -- ^ add user-visible tags when generating/modifying transactions & postings ? ,reportspan_ :: DateSpan -- ^ a dirty hack keeping the query dates in InputOpts. This rightfully lives in ReportSpec, but is duplicated here. ,auto_ :: Bool -- ^ generate automatic postings when journal is parsed ? ,infer_equity_ :: Bool -- ^ infer equity conversion postings from costs ? ,infer_costs_ :: Bool -- ^ infer costs from equity conversion postings ? distinct from BalancingOpts{infer_balancing_costs_} ,balancingopts_ :: BalancingOpts -- ^ options for balancing transactions ,strict_ :: Bool -- ^ do extra error checking (eg, all posted accounts are declared, no prices are inferred) ,_ioDay :: Day -- ^ today's date, for use with forecast transactions XXX this duplicates _rsDay, and should eventually be removed when it's not needed anymore. } deriving (Show) definputopts :: InputOpts definputopts = InputOpts { mformat_ = Nothing , mrules_file_ = Nothing , aliases_ = [] , anon_ = False , new_ = False , new_save_ = True , pivot_ = "" , forecast_ = Nothing , verbose_tags_ = False , reportspan_ = nulldatespan , auto_ = False , infer_equity_ = False , infer_costs_ = False , balancingopts_ = defbalancingopts , strict_ = False , _ioDay = nulldate } -- | Get the Maybe the DateSpan to generate forecast options from. -- This begins on: -- - the start date supplied to the `--forecast` argument, if present -- - otherwise, the later of -- - the report start date if specified with -b/-p/date: -- - the day after the latest normal (non-periodic) transaction in the journal, if any -- - otherwise today. -- It ends on: -- - the end date supplied to the `--forecast` argument, if present -- - otherwise the report end date if specified with -e/-p/date: -- - otherwise 180 days (6 months) from today. forecastPeriod :: InputOpts -> Journal -> Maybe DateSpan forecastPeriod iopts j = do DateSpan requestedStart requestedEnd <- forecast_ iopts let forecastStart = fromEFDay <$> requestedStart <|> max mjournalend (fromEFDay <$> reportStart) <|> Just (_ioDay iopts) forecastEnd = fromEFDay <$> requestedEnd <|> fromEFDay <$> reportEnd <|> (Just $ addDays 180 $ _ioDay iopts) mjournalend = dbg2 "journalEndDate" $ journalEndDate False j -- ignore secondary dates DateSpan reportStart reportEnd = reportspan_ iopts return . dbg2 "forecastspan" $ DateSpan (Exact <$> forecastStart) (Exact <$> forecastEnd) -- ** Lenses makeHledgerClassyLenses ''InputOpts instance HasBalancingOpts InputOpts where balancingOpts = balancingopts hledger-lib-1.30/Hledger/Read/JournalReader.hs0000644000000000000000000012615214434445206017334 0ustar0000000000000000--- * -*- outline-regexp:"--- *"; -*- --- ** doc -- In Emacs, use TAB on lines beginning with "-- *" to collapse/expand sections. {-| A reader for hledger's journal file format (). hledger's journal format is a compatible subset of c++ ledger's (), so this reader should handle many ledger files as well. Example: @ 2012\/3\/24 gift expenses:gifts $10 assets:cash @ Journal format supports the include directive which can read files in other formats, so the other file format readers need to be importable and invocable here. Some important parts of journal parsing are therefore kept in Hledger.Read.Common, to avoid import cycles. -} --- ** language {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE NoMonoLocalBinds #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PackageImports #-} {-# LANGUAGE ScopedTypeVariables #-} --- ** exports module Hledger.Read.JournalReader ( -- * Reader-finding utils findReader, splitReaderPrefix, -- * Reader reader, -- * Parsing utils parseAndFinaliseJournal, runJournalParser, rjp, runErroringJournalParser, rejp, -- * Parsers used elsewhere getParentAccount, journalp, directivep, defaultyeardirectivep, marketpricedirectivep, datetimep, datep, modifiedaccountnamep, tmpostingrulep, statusp, emptyorcommentlinep, followingcommentp, accountaliasp -- * Tests ,tests_JournalReader ) where --- ** imports import qualified Control.Monad.Fail as Fail (fail) import qualified Control.Exception as C import Control.Monad (forM_, when, void, unless) import Control.Monad.IO.Class (MonadIO, liftIO) import Control.Monad.Except (ExceptT(..), runExceptT) import Control.Monad.State.Strict (evalStateT,get,modify',put) import Control.Monad.Trans.Class (lift) import Data.Char (toLower) import Data.Either (isRight, lefts) import qualified Data.Map.Strict as M import Data.Text (Text) import Data.String import Data.List import Data.Maybe import qualified Data.Text as T import Data.Time.Calendar import Data.Time.LocalTime import Safe import Text.Megaparsec hiding (parse) import Text.Megaparsec.Char import Text.Megaparsec.Custom import Text.Printf import System.FilePath import "Glob" System.FilePath.Glob hiding (match) import Hledger.Data import Hledger.Read.Common import Hledger.Utils import qualified Hledger.Read.CsvReader as CsvReader (reader) import qualified Hledger.Read.RulesReader as RulesReader (reader) import qualified Hledger.Read.TimeclockReader as TimeclockReader (reader) import qualified Hledger.Read.TimedotReader as TimedotReader (reader) --- ** doctest setup -- $setup -- >>> :set -XOverloadedStrings -- --- ** parsing utilities -- | Run a journal parser in some monad. See also: parseWithState. runJournalParser, rjp :: Monad m => JournalParser m a -> Text -> m (Either HledgerParseErrors a) runJournalParser p = runParserT (evalStateT p nulljournal) "" rjp = runJournalParser -- | Run an erroring journal parser in some monad. See also: parseWithState. runErroringJournalParser, rejp :: Monad m => ErroringJournalParser m a -> Text -> m (Either FinalParseError (Either HledgerParseErrors a)) runErroringJournalParser p t = runExceptT $ runParserT (evalStateT p nulljournal) "" t rejp = runErroringJournalParser --- ** reader finding utilities -- Defined here rather than Hledger.Read so that we can use them in includedirectivep below. -- The available journal readers, each one handling a particular data format. readers' :: MonadIO m => [Reader m] readers' = [ reader ,TimeclockReader.reader ,TimedotReader.reader ,RulesReader.reader ,CsvReader.reader -- ,LedgerReader.reader ] readerNames :: [String] readerNames = map rFormat (readers'::[Reader IO]) -- | @findReader mformat mpath@ -- -- Find the reader named by @mformat@, if provided. -- Or, if a file path is provided, find the first reader that handles -- its file extension, if any. findReader :: MonadIO m => Maybe StorageFormat -> Maybe FilePath -> Maybe (Reader m) findReader Nothing Nothing = Nothing findReader (Just fmt) _ = headMay [r | r <- readers', rFormat r == fmt] findReader Nothing (Just path) = case prefix of Just fmt -> headMay [r | r <- readers', rFormat r == fmt] Nothing -> headMay [r | r <- readers', ext `elem` rExtensions r] where (prefix,path') = splitReaderPrefix path ext = map toLower $ drop 1 $ takeExtension path' -- | A file path optionally prefixed by a reader name and colon -- (journal:, csv:, timedot:, etc.). type PrefixedFilePath = FilePath -- | If a filepath is prefixed by one of the reader names and a colon, -- split that off. Eg "csv:-" -> (Just "csv", "-"). splitReaderPrefix :: PrefixedFilePath -> (Maybe String, FilePath) splitReaderPrefix f = headDef (Nothing, f) $ [(Just r, drop (length r + 1) f) | r <- readerNames, (r++":") `isPrefixOf` f] --- ** reader reader :: MonadIO m => Reader m reader = Reader {rFormat = "journal" ,rExtensions = ["journal", "j", "hledger", "ledger"] ,rReadFn = parse ,rParser = journalp -- no need to add command line aliases like journalp' -- when called as a subparser I think } -- | Parse and post-process a "Journal" from hledger's journal file -- format, or give an error. parse :: InputOpts -> FilePath -> Text -> ExceptT String IO Journal parse iopts f = parseAndFinaliseJournal journalp' iopts f where journalp' = do -- reverse parsed aliases to ensure that they are applied in order given on commandline mapM_ addAccountAlias (reverse $ aliasesFromOpts iopts) journalp --- ** parsers --- *** journal -- | A journal parser. Accumulates and returns a "ParsedJournal", -- which should be finalised/validated before use. -- -- >>> rejp (journalp <* eof) "2015/1/1\n a 0\n" -- Right (Right Journal (unknown) with 1 transactions, 1 accounts) -- journalp :: MonadIO m => ErroringJournalParser m ParsedJournal journalp = do many addJournalItemP eof get -- | A side-effecting parser; parses any kind of journal item -- and updates the parse state accordingly. addJournalItemP :: MonadIO m => ErroringJournalParser m () addJournalItemP = -- all journal line types can be distinguished by the first -- character, can use choice without backtracking choice [ directivep , transactionp >>= modify' . addTransaction , transactionmodifierp >>= modify' . addTransactionModifier , periodictransactionp >>= modify' . addPeriodicTransaction , marketpricedirectivep >>= modify' . addPriceDirective , void (lift emptyorcommentlinep) , void (lift multilinecommentp) ] "transaction or directive" --- *** directives -- | Parse any journal directive and update the parse state accordingly. -- Cf http://hledger.org/hledger.html#directives, -- http://ledger-cli.org/3.0/doc/ledger3.html#Command-Directives directivep :: MonadIO m => ErroringJournalParser m () directivep = (do optional $ oneOf ['!','@'] choice [ includedirectivep ,aliasdirectivep ,endaliasesdirectivep ,accountdirectivep ,applyaccountdirectivep ,applyfixeddirectivep ,applytagdirectivep ,assertdirectivep ,bucketdirectivep ,capturedirectivep ,checkdirectivep ,commandlineflagdirectivep ,commoditydirectivep ,commodityconversiondirectivep ,decimalmarkdirectivep ,defaultyeardirectivep ,defaultcommoditydirectivep ,definedirectivep ,endapplyaccountdirectivep ,endapplyfixeddirectivep ,endapplytagdirectivep ,endapplyyeardirectivep ,endtagdirectivep ,evaldirectivep ,exprdirectivep ,ignoredpricecommoditydirectivep ,payeedirectivep ,pythondirectivep ,tagdirectivep ,valuedirectivep ] ) "directive" -- | Parse an include directive. include's argument is an optionally -- file-format-prefixed file path or glob pattern. In the latter case, -- the prefix is applied to each matched path. Examples: -- foo.j, foo/bar.j, timedot:foo/2020*.md includedirectivep :: MonadIO m => ErroringJournalParser m () includedirectivep = do string "include" lift skipNonNewlineSpaces1 prefixedglob <- T.unpack <$> takeWhileP Nothing (/= '\n') -- don't consume newline yet parentoff <- getOffset parentpos <- getSourcePos let (mprefix,glb) = splitReaderPrefix prefixedglob paths <- getFilePaths parentoff parentpos glb let prefixedpaths = case mprefix of Nothing -> paths Just fmt -> map ((fmt++":")++) paths forM_ prefixedpaths $ parseChild parentpos void newline where getFilePaths :: MonadIO m => Int -> SourcePos -> FilePath -> JournalParser m [FilePath] getFilePaths parseroff parserpos filename = do let curdir = takeDirectory (sourceName parserpos) filename' <- lift $ expandHomePath filename `orRethrowIOError` (show parserpos ++ " locating " ++ filename) -- Compiling filename as a glob pattern works even if it is a literal fileglob <- case tryCompileWith compDefault{errorRecovery=False} filename' of Right x -> pure x Left e -> customFailure $ parseErrorAt parseroff $ "Invalid glob pattern: " ++ e -- Get all matching files in the current working directory, sorting in -- lexicographic order to simulate the output of 'ls'. filepaths <- liftIO $ sort <$> globDir1 fileglob curdir if (not . null) filepaths then pure filepaths else customFailure $ parseErrorAt parseroff $ "No existing files match pattern: " ++ filename parseChild :: MonadIO m => SourcePos -> PrefixedFilePath -> ErroringJournalParser m () parseChild parentpos prefixedpath = do let (_mprefix,filepath) = splitReaderPrefix prefixedpath parentj <- get let parentfilestack = jincludefilestack parentj when (filepath `elem` parentfilestack) $ Fail.fail ("Cyclic include: " ++ filepath) childInput <- traceOrLogAt 6 ("parseChild: "++takeFileName filepath) $ lift $ readFilePortably filepath `orRethrowIOError` (show parentpos ++ " reading " ++ filepath) let initChildj = newJournalWithParseStateFrom filepath parentj -- Choose a reader/parser based on the file path prefix or file extension, -- defaulting to JournalReader. Duplicating readJournal a bit here. let r = fromMaybe reader $ findReader Nothing (Just prefixedpath) parser = rParser r dbg6IO "parseChild: trying reader" (rFormat r) -- Parse the file (of whichever format) to a Journal, with file path and source text attached. updatedChildj <- journalAddFile (filepath, childInput) <$> parseIncludeFile parser initChildj filepath childInput -- Merge this child journal into the parent journal -- (with debug logging for troubleshooting account display order). -- The parent journal is the second argument to journalConcat; this means -- its parse state is kept, and its lists are appended to child's (which -- ultimately produces the right list order, because parent's and child's -- lists are in reverse order at this stage. Cf #1909). let parentj' = dbgJournalAcctDeclOrder ("parseChild: child " <> childfilename <> " acct decls: ") updatedChildj `journalConcat` dbgJournalAcctDeclOrder ("parseChild: parent " <> parentfilename <> " acct decls: ") parentj where childfilename = takeFileName filepath parentfilename = maybe "(unknown)" takeFileName $ headMay $ jincludefilestack parentj -- XXX more accurate than journalFilePath for some reason -- Update the parse state. put parentj' newJournalWithParseStateFrom :: FilePath -> Journal -> Journal newJournalWithParseStateFrom filepath j = nulljournal{ jparsedefaultyear = jparsedefaultyear j ,jparsedefaultcommodity = jparsedefaultcommodity j ,jparseparentaccounts = jparseparentaccounts j ,jparsedecimalmark = jparsedecimalmark j ,jparsealiases = jparsealiases j ,jcommodities = jcommodities j -- ,jparsetransactioncount = jparsetransactioncount j ,jparsetimeclockentries = jparsetimeclockentries j ,jincludefilestack = filepath : jincludefilestack j } -- | Lift an IO action into the exception monad, rethrowing any IO -- error with the given message prepended. orRethrowIOError :: MonadIO m => IO a -> String -> TextParser m a orRethrowIOError io msg = do eResult <- liftIO $ (Right <$> io) `C.catch` \(e::C.IOException) -> pure $ Left $ printf "%s:\n%s" msg (show e) case eResult of Right res -> pure res Left errMsg -> Fail.fail errMsg -- Parse an account directive, adding its info to the journal's -- list of account declarations. accountdirectivep :: JournalParser m () accountdirectivep = do off <- getOffset -- XXX figure out a more precise position later pos <- getSourcePos string "account" lift skipNonNewlineSpaces1 -- the account name, possibly modified by preceding alias or apply account directives acct <- (notFollowedBy (char '(' <|> char '[') "account name without brackets") >> modifiedaccountnamep -- maybe a comment, on this and/or following lines (cmt, tags) <- lift transactioncommentp -- maybe Ledger-style subdirectives (ignored) skipMany indentedlinep -- an account type may have been set by account type code or a tag; -- the latter takes precedence let metype = parseAccountTypeCode <$> lookup accountTypeTagName tags -- update the journal addAccountDeclaration (acct, cmt, tags, pos) unless (null tags) $ addDeclaredAccountTags acct tags case metype of Nothing -> return () Just (Right t) -> addDeclaredAccountType acct t Just (Left err) -> customFailure $ parseErrorAt off err -- The special tag used for declaring account type. XXX change to "class" ? accountTypeTagName = "type" parseAccountTypeCode :: Text -> Either String AccountType parseAccountTypeCode s = case T.toLower s of "asset" -> Right Asset "a" -> Right Asset "liability" -> Right Liability "l" -> Right Liability "equity" -> Right Equity "e" -> Right Equity "revenue" -> Right Revenue "r" -> Right Revenue "expense" -> Right Expense "x" -> Right Expense "cash" -> Right Cash "c" -> Right Cash "conversion" -> Right Conversion "v" -> Right Conversion _ -> Left err where err = T.unpack $ "invalid account type code "<>s<>", should be one of " <> T.intercalate ", " ["A","L","E","R","X","C","V","Asset","Liability","Equity","Revenue","Expense","Cash","Conversion"] -- Add an account declaration to the journal, auto-numbering it. addAccountDeclaration :: (AccountName,Text,[Tag],SourcePos) -> JournalParser m () addAccountDeclaration (a,cmt,tags,pos) = do modify' (\j -> let decls = jdeclaredaccounts j d = (a, nullaccountdeclarationinfo{ adicomment = cmt ,aditags = tags ,adideclarationorder = length decls + 1 -- gets renumbered when Journals are finalised or merged ,adisourcepos = pos }) in j{jdeclaredaccounts = d:decls}) -- Add a payee declaration to the journal. addPayeeDeclaration :: (Payee,Text,[Tag]) -> JournalParser m () addPayeeDeclaration (p, cmt, tags) = modify' (\j@Journal{jdeclaredpayees} -> j{jdeclaredpayees=d:jdeclaredpayees}) where d = (p ,nullpayeedeclarationinfo{ pdicomment = cmt ,pditags = tags }) -- Add a tag declaration to the journal. addTagDeclaration :: (TagName,Text) -> JournalParser m () addTagDeclaration (t, cmt) = modify' (\j@Journal{jdeclaredtags} -> j{jdeclaredtags=tagandinfo:jdeclaredtags}) where tagandinfo = (t, nulltagdeclarationinfo{tdicomment=cmt}) indentedlinep :: JournalParser m String indentedlinep = lift skipNonNewlineSpaces1 >> (rstrip <$> lift restofline) -- | Parse a one-line or multi-line commodity directive. -- -- >>> Right _ <- rjp commoditydirectivep "commodity $1.00" -- >>> Right _ <- rjp commoditydirectivep "commodity $\n format $1.00" -- >>> Right _ <- rjp commoditydirectivep "commodity $\n\n" -- a commodity with no format -- >>> Right _ <- rjp commoditydirectivep "commodity $1.00\n format $1.00" -- both, what happens ? commoditydirectivep :: JournalParser m () commoditydirectivep = commoditydirectiveonelinep <|> commoditydirectivemultilinep -- | Parse a one-line commodity directive. -- -- >>> Right _ <- rjp commoditydirectiveonelinep "commodity $1.00" -- >>> Right _ <- rjp commoditydirectiveonelinep "commodity $1.00 ; blah\n" commoditydirectiveonelinep :: JournalParser m () commoditydirectiveonelinep = do (off, Amount{acommodity,astyle}) <- try $ do string "commodity" lift skipNonNewlineSpaces1 off <- getOffset amt <- amountp pure $ (off, amt) lift skipNonNewlineSpaces _ <- lift followingcommentp let comm = Commodity{csymbol=acommodity, cformat=Just $ dbg6 "style from commodity directive" astyle} if isNothing $ asdecimalpoint astyle then customFailure $ parseErrorAt off pleaseincludedecimalpoint else modify' (\j -> j{jcommodities=M.insert acommodity comm $ jcommodities j}) pleaseincludedecimalpoint :: String pleaseincludedecimalpoint = chomp $ unlines [ "Please include a decimal point or decimal comma in commodity directives," ,"to help us parse correctly. It may be followed by zero or more decimal digits." ,"Examples:" ,"commodity $1000. ; no thousands mark, decimal period, no decimals" ,"commodity 1.234,00 ARS ; period at thousands, decimal comma, 2 decimals" ,"commodity EUR 1 000,000 ; space at thousands, decimal comma, 3 decimals" ,"commodity INR1,23,45,678.0 ; comma at thousands/lakhs/crores, decimal period, 1 decimal" ] -- | Parse a multi-line commodity directive, containing 0 or more format subdirectives. -- -- >>> Right _ <- rjp commoditydirectivemultilinep "commodity $ ; blah \n format $1.00 ; blah" commoditydirectivemultilinep :: JournalParser m () commoditydirectivemultilinep = do string "commodity" lift skipNonNewlineSpaces1 sym <- lift commoditysymbolp _ <- lift followingcommentp -- read all subdirectives, saving format subdirectives as Lefts subdirectives <- many $ indented (eitherP (formatdirectivep sym) (lift restofline)) let mfmt = lastMay $ lefts subdirectives let comm = Commodity{csymbol=sym, cformat=mfmt} modify' (\j -> j{jcommodities=M.insert sym comm $ jcommodities j}) where indented = (lift skipNonNewlineSpaces1 >>) -- | Parse a format (sub)directive, throwing a parse error if its -- symbol does not match the one given. formatdirectivep :: CommoditySymbol -> JournalParser m AmountStyle formatdirectivep expectedsym = do string "format" lift skipNonNewlineSpaces1 off <- getOffset Amount{acommodity,astyle} <- amountp _ <- lift followingcommentp if acommodity==expectedsym then if isNothing $ asdecimalpoint astyle then customFailure $ parseErrorAt off pleaseincludedecimalpoint else return $ dbg6 "style from format subdirective" astyle else customFailure $ parseErrorAt off $ printf "commodity directive symbol \"%s\" and format directive symbol \"%s\" should be the same" expectedsym acommodity -- More Ledger directives, ignore for now: -- apply fixed, apply tag, assert, bucket, A, capture, check, define, expr applyfixeddirectivep, endapplyfixeddirectivep, applytagdirectivep, endapplytagdirectivep, assertdirectivep, bucketdirectivep, capturedirectivep, checkdirectivep, endapplyyeardirectivep, definedirectivep, exprdirectivep, valuedirectivep, evaldirectivep, pythondirectivep, commandlineflagdirectivep :: JournalParser m () applyfixeddirectivep = do string "apply fixed" >> lift restofline >> return () endapplyfixeddirectivep = do string "end apply fixed" >> lift restofline >> return () applytagdirectivep = do string "apply tag" >> lift restofline >> return () endapplytagdirectivep = do string "end apply tag" >> lift restofline >> return () endapplyyeardirectivep = do string "end apply year" >> lift restofline >> return () assertdirectivep = do string "assert" >> lift restofline >> return () bucketdirectivep = do string "A " <|> string "bucket " >> lift restofline >> return () capturedirectivep = do string "capture" >> lift restofline >> return () checkdirectivep = do string "check" >> lift restofline >> return () definedirectivep = do string "define" >> lift restofline >> return () exprdirectivep = do string "expr" >> lift restofline >> return () valuedirectivep = do string "value" >> lift restofline >> return () evaldirectivep = do string "eval" >> lift restofline >> return () commandlineflagdirectivep = do string "--" >> lift restofline >> return () pythondirectivep = do string "python" >> lift restofline many $ indentedline <|> blankline return () where indentedline = lift skipNonNewlineSpaces1 >> lift restofline blankline = lift skipNonNewlineSpaces >> newline >> return "" "blank line" keywordp :: String -> JournalParser m () keywordp = void . string . fromString spacesp :: JournalParser m () spacesp = void $ lift skipNonNewlineSpaces1 -- | Backtracking parser similar to string, but allows varying amount of space between words keywordsp :: String -> JournalParser m () keywordsp = try . sequence_ . intersperse spacesp . map keywordp . words applyaccountdirectivep :: JournalParser m () applyaccountdirectivep = do keywordsp "apply account" "apply account directive" lift skipNonNewlineSpaces1 parent <- lift accountnamep newline pushParentAccount parent endapplyaccountdirectivep :: JournalParser m () endapplyaccountdirectivep = do keywordsp "end apply account" "end apply account directive" popParentAccount aliasdirectivep :: JournalParser m () aliasdirectivep = do string "alias" lift skipNonNewlineSpaces1 alias <- lift accountaliasp addAccountAlias alias endaliasesdirectivep :: JournalParser m () endaliasesdirectivep = do keywordsp "end aliases" "end aliases directive" clearAccountAliases tagdirectivep :: JournalParser m () tagdirectivep = do string "tag" "tag directive" lift skipNonNewlineSpaces1 tagname <- lift $ T.pack <$> some nonspace (comment, _) <- lift transactioncommentp skipMany indentedlinep addTagDeclaration (tagname,comment) return () -- end tag or end apply tag endtagdirectivep :: JournalParser m () endtagdirectivep = (do string "end" lift skipNonNewlineSpaces1 optional $ string "apply" >> lift skipNonNewlineSpaces1 string "tag" lift skipNonNewlineSpaces eol return () ) "end tag or end apply tag directive" payeedirectivep :: JournalParser m () payeedirectivep = do string "payee" "payee directive" lift skipNonNewlineSpaces1 payee <- lift $ T.strip <$> noncommenttext1p (comment, tags) <- lift transactioncommentp skipMany indentedlinep addPayeeDeclaration (payee, comment, tags) return () defaultyeardirectivep :: JournalParser m () defaultyeardirectivep = do (string "Y" <|> string "year" <|> string "apply year") "default year" lift skipNonNewlineSpaces setYear =<< lift yearp defaultcommoditydirectivep :: JournalParser m () defaultcommoditydirectivep = do char 'D' "default commodity" lift skipNonNewlineSpaces1 off <- getOffset Amount{acommodity,astyle} <- amountp lift restofline if isNothing $ asdecimalpoint astyle then customFailure $ parseErrorAt off pleaseincludedecimalpoint else setDefaultCommodityAndStyle (acommodity, astyle) marketpricedirectivep :: JournalParser m PriceDirective marketpricedirectivep = do char 'P' "market price" lift skipNonNewlineSpaces date <- try (do {LocalTime d _ <- datetimep; return d}) <|> datep -- a time is ignored lift skipNonNewlineSpaces1 symbol <- lift commoditysymbolp lift skipNonNewlineSpaces price <- amountp lift restofline return $ PriceDirective date symbol price ignoredpricecommoditydirectivep :: JournalParser m () ignoredpricecommoditydirectivep = do char 'N' "ignored-price commodity" lift skipNonNewlineSpaces1 lift commoditysymbolp lift restofline return () commodityconversiondirectivep :: JournalParser m () commodityconversiondirectivep = do char 'C' "commodity conversion" lift skipNonNewlineSpaces1 amountp lift skipNonNewlineSpaces char '=' lift skipNonNewlineSpaces amountp lift restofline return () -- | Read a valid decimal mark from the decimal-mark directive e.g -- -- decimal-mark , decimalmarkdirectivep :: JournalParser m () decimalmarkdirectivep = do string "decimal-mark" "decimal mark" lift skipNonNewlineSpaces1 mark <- satisfy isDecimalMark modify' $ \j -> j{jparsedecimalmark=Just mark} lift restofline return () --- *** transactions -- | Parse a transaction modifier (auto postings) rule. transactionmodifierp :: JournalParser m TransactionModifier transactionmodifierp = do char '=' "modifier transaction" lift skipNonNewlineSpaces querytxt <- lift $ T.strip <$> descriptionp (_comment, _tags) <- lift transactioncommentp -- TODO apply these to modified txns ? postingrules <- tmpostingrulesp Nothing return $ TransactionModifier querytxt postingrules -- | Parse a periodic transaction rule. -- -- This reuses periodexprp which parses period expressions on the command line. -- This is awkward because periodexprp supports relative and partial dates, -- which we don't really need here, and it doesn't support the notion of a -- default year set by a Y directive, which we do need to consider here. -- We resolve it as follows: in periodic transactions' period expressions, -- if there is a default year Y in effect, partial/relative dates are calculated -- relative to Y/1/1. If not, they are calculated related to today as usual. periodictransactionp :: MonadIO m => JournalParser m PeriodicTransaction periodictransactionp = do startpos <- getSourcePos -- first line char '~' "periodic transaction" lift $ skipNonNewlineSpaces -- if there's a default year in effect, use Y/1/1 as base for partial/relative dates today <- liftIO getCurrentDay mdefaultyear <- getYear let refdate = case mdefaultyear of Nothing -> today Just y -> fromGregorian y 1 1 periodExcerpt <- lift $ excerpt_ $ singlespacedtextsatisfying1p (\c -> c /= ';' && c /= '\n') let periodtxt = T.strip $ getExcerptText periodExcerpt -- first parsing with 'singlespacedtextp', then "re-parsing" with -- 'periodexprp' saves 'periodexprp' from having to respect the single- -- and double-space parsing rules (interval, spn) <- lift $ reparseExcerpt periodExcerpt $ do pexp <- periodexprp refdate (<|>) eof $ do offset1 <- getOffset void takeRest offset2 <- getOffset customFailure $ parseErrorAtRegion offset1 offset2 $ "remainder of period expression cannot be parsed" <> "\nperhaps you need to terminate the period expression with a double space?" <> "\na double space is required between period expression and description/comment" pure pexp status <- lift statusp "cleared status" code <- lift codep "transaction code" description <- lift $ T.strip <$> descriptionp (comment, tags) <- lift transactioncommentp -- next lines; use same year determined above postings <- postingsp (Just $ first3 $ toGregorian refdate) endpos <- getSourcePos let sourcepos = (startpos, endpos) return $ nullperiodictransaction{ ptperiodexpr=periodtxt ,ptinterval=interval ,ptspan=spn ,ptsourcepos=sourcepos ,ptstatus=status ,ptcode=code ,ptdescription=description ,ptcomment=comment ,pttags=tags ,ptpostings=postings } -- | Parse a (possibly unbalanced) transaction. transactionp :: JournalParser m Transaction transactionp = do -- dbgparse 0 "transactionp" startpos <- getSourcePos date <- datep "transaction" edate <- optional (lift $ secondarydatep date) "secondary date" lookAhead (lift spacenonewline <|> newline) "whitespace or newline" status <- lift statusp "cleared status" code <- lift codep "transaction code" description <- lift $ T.strip <$> descriptionp (comment, tags) <- lift transactioncommentp let year = first3 $ toGregorian date postings <- postingsp (Just year) endpos <- getSourcePos let sourcepos = (startpos, endpos) return $ txnTieKnot $ Transaction 0 "" sourcepos date edate status code description comment tags postings --- *** postings -- Parse the following whitespace-beginning lines as postings, posting -- tags, and/or comments (inferring year, if needed, from the given date). postingsp :: Maybe Year -> JournalParser m [Posting] postingsp mTransactionYear = many (postingp mTransactionYear) "postings" -- linebeginningwithspaces :: JournalParser m String -- linebeginningwithspaces = do -- sp <- lift skipNonNewlineSpaces1 -- c <- nonspace -- cs <- lift restofline -- return $ sp ++ (c:cs) ++ "\n" postingp :: Maybe Year -> JournalParser m Posting postingp = fmap fst . postingphelper False -- Parse the following whitespace-beginning lines as transaction posting rules, posting -- tags, and/or comments (inferring year, if needed, from the given date). tmpostingrulesp :: Maybe Year -> JournalParser m [TMPostingRule] tmpostingrulesp mTransactionYear = many (tmpostingrulep mTransactionYear) "posting rules" tmpostingrulep :: Maybe Year -> JournalParser m TMPostingRule tmpostingrulep = fmap (uncurry TMPostingRule) . postingphelper True -- Parse a Posting, and return a flag with whether a multiplier has been detected. -- The multiplier is used in TMPostingRules. postingphelper :: Bool -> Maybe Year -> JournalParser m (Posting, Bool) postingphelper isPostingRule mTransactionYear = do -- lift $ dbgparse 0 "postingp" (status, account) <- try $ do lift skipNonNewlineSpaces1 status <- lift statusp lift skipNonNewlineSpaces account <- modifiedaccountnamep return (status, account) let (ptype, account') = (accountNamePostingType account, textUnbracket account) lift skipNonNewlineSpaces mult <- if isPostingRule then multiplierp else pure False amt <- optional $ amountp' mult lift skipNonNewlineSpaces massertion <- optional balanceassertionp lift skipNonNewlineSpaces (comment,tags,mdate,mdate2) <- lift $ postingcommentp mTransactionYear let p = posting { pdate=mdate , pdate2=mdate2 , pstatus=status , paccount=account' , pamount=maybe missingmixedamt mixedAmount amt , pcomment=comment , ptype=ptype , ptags=tags , pbalanceassertion=massertion } return (p, mult) where multiplierp = option False $ True <$ char '*' --- ** tests tests_JournalReader = testGroup "JournalReader" [ let p = lift accountnamep :: JournalParser IO AccountName in testGroup "accountnamep" [ testCase "basic" $ assertParse p "a:b:c" -- ,testCase "empty inner component" $ assertParseError p "a::c" "" -- TODO -- ,testCase "empty leading component" $ assertParseError p ":b:c" "x" -- ,testCase "empty trailing component" $ assertParseError p "a:b:" "x" ] -- "Parse a date in YYYY/MM/DD format. -- Hyphen (-) and period (.) are also allowed as separators. -- The year may be omitted if a default year has been set. -- Leading zeroes may be omitted." ,testGroup "datep" [ testCase "YYYY/MM/DD" $ assertParseEq datep "2018/01/01" (fromGregorian 2018 1 1) ,testCase "YYYY-MM-DD" $ assertParse datep "2018-01-01" ,testCase "YYYY.MM.DD" $ assertParse datep "2018.01.01" ,testCase "yearless date with no default year" $ assertParseError datep "1/1" "current year is unknown" ,testCase "yearless date with default year" $ do let s = "1/1" ep <- parseWithState nulljournal{jparsedefaultyear=Just 2018} datep s either (assertFailure . ("parse error at "++) . customErrorBundlePretty) (const $ return ()) ep ,testCase "no leading zero" $ assertParse datep "2018/1/1" ] ,testCase "datetimep" $ do let good = assertParse datetimep bad t = assertParseError datetimep t "" good "2011/1/1 00:00" good "2011/1/1 23:59:59" bad "2011/1/1" bad "2011/1/1 24:00:00" bad "2011/1/1 00:60:00" bad "2011/1/1 00:00:60" bad "2011/1/1 3:5:7" -- timezone is parsed but ignored let t = LocalTime (fromGregorian 2018 1 1) (TimeOfDay 0 0 0) assertParseEq datetimep "2018/1/1 00:00-0800" t assertParseEq datetimep "2018/1/1 00:00+1234" t ,testGroup "periodictransactionp" [ testCase "more period text in comment after one space" $ assertParseEq periodictransactionp "~ monthly from 2018/6 ;In 2019 we will change this\n" nullperiodictransaction { ptperiodexpr = "monthly from 2018/6" ,ptinterval = Months 1 ,ptspan = DateSpan (Just $ Flex $ fromGregorian 2018 6 1) Nothing ,ptsourcepos = (SourcePos "" (mkPos 1) (mkPos 1), SourcePos "" (mkPos 2) (mkPos 1)) ,ptdescription = "" ,ptcomment = "In 2019 we will change this\n" } ,testCase "more period text in description after two spaces" $ assertParseEq periodictransactionp "~ monthly from 2018/6 In 2019 we will change this\n" nullperiodictransaction { ptperiodexpr = "monthly from 2018/6" ,ptinterval = Months 1 ,ptspan = DateSpan (Just $ Flex $ fromGregorian 2018 6 1) Nothing ,ptsourcepos = (SourcePos "" (mkPos 1) (mkPos 1), SourcePos "" (mkPos 2) (mkPos 1)) ,ptdescription = "In 2019 we will change this" ,ptcomment = "" } ,testCase "Next year in description" $ assertParseEq periodictransactionp "~ monthly Next year blah blah\n" nullperiodictransaction { ptperiodexpr = "monthly" ,ptinterval = Months 1 ,ptspan = DateSpan Nothing Nothing ,ptsourcepos = (SourcePos "" (mkPos 1) (mkPos 1), SourcePos "" (mkPos 2) (mkPos 1)) ,ptdescription = "Next year blah blah" ,ptcomment = "" } ,testCase "Just date, no description" $ assertParseEq periodictransactionp "~ 2019-01-04\n" nullperiodictransaction { ptperiodexpr = "2019-01-04" ,ptinterval = NoInterval ,ptspan = DateSpan (Just $ Exact $ fromGregorian 2019 1 4) (Just $ Exact $ fromGregorian 2019 1 5) ,ptsourcepos = (SourcePos "" (mkPos 1) (mkPos 1), SourcePos "" (mkPos 2) (mkPos 1)) ,ptdescription = "" ,ptcomment = "" } ,testCase "Just date, no description + empty transaction comment" $ assertParse periodictransactionp "~ 2019-01-04\n ;\n a 1\n b\n" ] ,testGroup "postingp" [ testCase "basic" $ assertParseEq (postingp Nothing) " expenses:food:dining $10.00 ; a: a a \n ; b: b b \n" posting{ paccount="expenses:food:dining", pamount=mixedAmount (usd 10), pcomment="a: a a\nb: b b\n", ptags=[("a","a a"), ("b","b b")] } ,testCase "posting dates" $ assertParseEq (postingp Nothing) " a 1. ; date:2012/11/28, date2=2012/11/29,b:b\n" nullposting{ paccount="a" ,pamount=mixedAmount (num 1) ,pcomment="date:2012/11/28, date2=2012/11/29,b:b\n" ,ptags=[("date", "2012/11/28"), ("date2=2012/11/29,b", "b")] -- TODO tag name parsed too greedily ,pdate=Just $ fromGregorian 2012 11 28 ,pdate2=Nothing -- Just $ fromGregorian 2012 11 29 } ,testCase "posting dates bracket syntax" $ assertParseEq (postingp Nothing) " a 1. ; [2012/11/28=2012/11/29]\n" nullposting{ paccount="a" ,pamount=mixedAmount (num 1) ,pcomment="[2012/11/28=2012/11/29]\n" ,ptags=[] ,pdate= Just $ fromGregorian 2012 11 28 ,pdate2=Just $ fromGregorian 2012 11 29 } ,testCase "quoted commodity symbol with digits" $ assertParse (postingp Nothing) " a 1 \"DE123\"\n" ,testCase "only lot price" $ assertParse (postingp Nothing) " a 1A {1B}\n" ,testCase "fixed lot price" $ assertParse (postingp Nothing) " a 1A {=1B}\n" ,testCase "total lot price" $ assertParse (postingp Nothing) " a 1A {{1B}}\n" ,testCase "fixed total lot price, and spaces" $ assertParse (postingp Nothing) " a 1A {{ = 1B }}\n" ,testCase "lot price before transaction price" $ assertParse (postingp Nothing) " a 1A {1B} @ 1B\n" ,testCase "lot price after transaction price" $ assertParse (postingp Nothing) " a 1A @ 1B {1B}\n" ,testCase "lot price after balance assertion not allowed" $ assertParseError (postingp Nothing) " a 1A @ 1B = 1A {1B}\n" "unexpected '{'" ,testCase "only lot date" $ assertParse (postingp Nothing) " a 1A [2000-01-01]\n" ,testCase "transaction price, lot price, lot date" $ assertParse (postingp Nothing) " a 1A @ 1B {1B} [2000-01-01]\n" ,testCase "lot date, lot price, transaction price" $ assertParse (postingp Nothing) " a 1A [2000-01-01] {1B} @ 1B\n" ,testCase "balance assertion over entire contents of account" $ assertParse (postingp Nothing) " a $1 == $1\n" ] ,testGroup "transactionmodifierp" [ testCase "basic" $ assertParseEq transactionmodifierp "= (some value expr)\n some:postings 1.\n" nulltransactionmodifier { tmquerytxt = "(some value expr)" ,tmpostingrules = [TMPostingRule nullposting{paccount="some:postings", pamount=mixedAmount (num 1)} False] } ] ,testGroup "transactionp" [ testCase "just a date" $ assertParseEq transactionp "2015/1/1\n" nulltransaction{tdate=fromGregorian 2015 1 1} ,testCase "more complex" $ assertParseEq transactionp (T.unlines [ "2012/05/14=2012/05/15 (code) desc ; tcomment1", " ; tcomment2", " ; ttag1: val1", " * a $1.00 ; pcomment1", " ; pcomment2", " ; ptag1: val1", " ; ptag2: val2" ]) nulltransaction{ tsourcepos=(SourcePos "" (mkPos 1) (mkPos 1), SourcePos "" (mkPos 8) (mkPos 1)), -- 8 because there are 7 lines tprecedingcomment="", tdate=fromGregorian 2012 5 14, tdate2=Just $ fromGregorian 2012 5 15, tstatus=Unmarked, tcode="code", tdescription="desc", tcomment="tcomment1\ntcomment2\nttag1: val1\n", ttags=[("ttag1","val1")], tpostings=[ nullposting{ pdate=Nothing, pstatus=Cleared, paccount="a", pamount=mixedAmount (usd 1), pcomment="pcomment1\npcomment2\nptag1: val1\nptag2: val2\n", ptype=RegularPosting, ptags=[("ptag1","val1"),("ptag2","val2")], ptransaction=Nothing } ] } ,testCase "parses a well-formed transaction" $ assertBool "" $ isRight $ rjp transactionp $ T.unlines ["2007/01/28 coopportunity" ," expenses:food:groceries $47.18" ," assets:checking $-47.18" ,"" ] ,testCase "does not parse a following comment as part of the description" $ assertParseEqOn transactionp "2009/1/1 a ;comment\n b 1\n" tdescription "a" ,testCase "parses a following whitespace line" $ assertBool "" $ isRight $ rjp transactionp $ T.unlines ["2012/1/1" ," a 1" ," b" ," " ] ,testCase "parses an empty transaction comment following whitespace line" $ assertBool "" $ isRight $ rjp transactionp $ T.unlines ["2012/1/1" ," ;" ," a 1" ," b" ," " ] ,testCase "comments everywhere, two postings parsed" $ assertParseEqOn transactionp (T.unlines ["2009/1/1 x ; transaction comment" ," a 1 ; posting 1 comment" ," ; posting 1 comment 2" ," b" ," ; posting 2 comment" ]) (length . tpostings) 2 ] -- directives ,testGroup "directivep" [ testCase "supports !" $ do assertParseE directivep "!account a\n" assertParseE directivep "!D 1.0\n" ] ,testGroup "accountdirectivep" [ testCase "with-comment" $ assertParse accountdirectivep "account a:b ; a comment\n" ,testCase "does-not-support-!" $ assertParseError accountdirectivep "!account a:b\n" "" ,testCase "account-type-code" $ assertParse accountdirectivep "account a:b ; type:A\n" ,testCase "account-type-tag" $ assertParseStateOn accountdirectivep "account a:b ; type:asset\n" jdeclaredaccounts [("a:b", AccountDeclarationInfo{adicomment = "type:asset\n" ,aditags = [("type","asset")] ,adideclarationorder = 1 ,adisourcepos = fst nullsourcepos }) ] ] ,testCase "commodityconversiondirectivep" $ do assertParse commodityconversiondirectivep "C 1h = $50.00\n" ,testCase "defaultcommoditydirectivep" $ do assertParse defaultcommoditydirectivep "D $1,000.0\n" assertParseError defaultcommoditydirectivep "D $1000\n" "Please include a decimal point or decimal comma" ,testGroup "defaultyeardirectivep" [ testCase "1000" $ assertParse defaultyeardirectivep "Y 1000" -- XXX no \n like the others -- ,testCase "999" $ assertParseError defaultyeardirectivep "Y 999" "bad year number" ,testCase "12345" $ assertParse defaultyeardirectivep "Y 12345" ] ,testCase "ignoredpricecommoditydirectivep" $ do assertParse ignoredpricecommoditydirectivep "N $\n" ,testGroup "includedirectivep" [ testCase "include" $ assertParseErrorE includedirectivep "include nosuchfile\n" "No existing files match pattern: nosuchfile" ,testCase "glob" $ assertParseErrorE includedirectivep "include nosuchfile*\n" "No existing files match pattern: nosuchfile*" ] ,testCase "marketpricedirectivep" $ assertParseEq marketpricedirectivep "P 2017/01/30 BTC $922.83\n" PriceDirective{ pddate = fromGregorian 2017 1 30, pdcommodity = "BTC", pdamount = usd 922.83 } ,testGroup "payeedirectivep" [ testCase "simple" $ assertParse payeedirectivep "payee foo\n" ,testCase "with-comment" $ assertParse payeedirectivep "payee foo ; comment\n" ] ,testCase "tagdirectivep" $ do assertParse tagdirectivep "tag foo \n" ,testCase "endtagdirectivep" $ do assertParse endtagdirectivep "end tag \n" assertParse endtagdirectivep "end apply tag \n" ,testGroup "journalp" [ testCase "empty file" $ assertParseEqE journalp "" nulljournal ] -- these are defined here rather than in Common so they can use journalp ,testCase "parseAndFinaliseJournal" $ do ej <- runExceptT $ parseAndFinaliseJournal journalp definputopts "" "2019-1-1\n" let Right j = ej assertEqual "" [""] $ journalFilePaths j ] hledger-lib-1.30/Hledger/Read/RulesReader.hs0000644000000000000000000026570214434445206017021 0ustar0000000000000000--- * module --- ** doc -- In Emacs, use TAB on lines beginning with "-- *" to collapse/expand sections. {-| A reader for a CSV rules file. This reads the actual data from a file specified by a `source` rule or from a similarly-named file in the same directory. Most of the code for reading rules files and csv files is in this module. -} -- Lots of haddocks in this file are for non-exported types. -- Here's a command that will render them: -- stack haddock hledger-lib --fast --no-haddock-deps --haddock-arguments='--ignore-all-exports' --open --- ** language {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -Wno-unrecognised-pragmas #-} --- ** exports module Hledger.Read.RulesReader ( -- * Reader reader, -- * Misc. readJournalFromCsv, -- readRulesFile, -- parseCsvRules, -- validateCsvRules, -- CsvRules, dataFileFor, rulesFileFor, -- * Tests tests_RulesReader, ) where --- ** imports import Prelude hiding (Applicative(..)) import Control.Applicative (Applicative(..)) import Control.Monad (unless, when, void) import Control.Monad.Except (ExceptT(..), liftEither, throwError) import qualified Control.Monad.Fail as Fail import Control.Monad.IO.Class (MonadIO, liftIO) import Control.Monad.State.Strict (StateT, get, modify', evalStateT) import Control.Monad.Trans.Class (lift) import Data.Char (toLower, isDigit, isSpace, isAlphaNum, ord) import Data.Bifunctor (first) import Data.Functor ((<&>)) import Data.List (elemIndex, foldl', mapAccumL, nub, sortOn) import Data.List.Extra (groupOn) import Data.Maybe (catMaybes, fromMaybe, isJust) import Data.MemoUgly (memo) import qualified Data.Set as S import Data.Text (Text) import qualified Data.Text as T import qualified Data.Text.Encoding as T import qualified Data.Text.IO as T import Data.Time ( Day, TimeZone, UTCTime, LocalTime, ZonedTime(ZonedTime), defaultTimeLocale, getCurrentTimeZone, localDay, parseTimeM, utcToLocalTime, localTimeToUTC, zonedTimeToUTC) import Safe (atMay, headMay, lastMay, readMay) import System.FilePath ((), takeDirectory, takeExtension, stripExtension, takeFileName) import qualified Data.Csv as Cassava import qualified Data.Csv.Parser.Megaparsec as CassavaMegaparsec import qualified Data.ByteString as B import qualified Data.ByteString.Lazy as BL import Data.Foldable (asum, toList) import Text.Megaparsec hiding (match, parse) import Text.Megaparsec.Char (char, newline, string) import Text.Megaparsec.Custom (parseErrorAt) import Text.Printf (printf) import Hledger.Data import Hledger.Utils import Hledger.Read.Common (aliasesFromOpts, Reader(..), InputOpts(..), amountp, statusp, journalFinalise, accountnamep ) import Hledger.Read.CsvUtils import System.Directory (doesFileExist, getHomeDirectory) --- ** doctest setup -- $setup -- >>> :set -XOverloadedStrings --- ** reader _READER__________________________________________ = undefined -- VSCode outline separator reader :: MonadIO m => Reader m reader = Reader {rFormat = "rules" ,rExtensions = ["rules"] ,rReadFn = parse ,rParser = error' "sorry, rules files can't be included" -- PARTIAL: } isFileName f = takeFileName f == f getDownloadDir = do home <- getHomeDirectory return $ home "Downloads" -- XXX -- | Parse and post-process a "Journal" from the given rules file path, or give an error. -- A data file is inferred from the @source@ rule, otherwise from a similarly-named file -- in the same directory. -- The source rule can specify a glob pattern and supports ~ for home directory. -- If it is a bare filename it will be relative to the defaut download directory -- on this system. If is a relative file path it will be relative to the rules -- file's directory. When a glob pattern matches multiple files, the alphabetically -- last is used. (Eg in case of multiple numbered downloads, the highest-numbered -- will be used.) -- The provided text, or a --rules-file option, are ignored by this reader. -- Balance assertions are not checked. parse :: InputOpts -> FilePath -> Text -> ExceptT String IO Journal parse iopts f _ = do rules <- readRulesFile $ dbg4 "reading rules file" f -- XXX higher-than usual debug level for file reading to bypass excessive noise from elsewhere, normally 6 or 7 mdatafile <- liftIO $ do dldir <- getDownloadDir let rulesdir = takeDirectory f let msource = T.unpack <$> getDirective "source" rules fs <- case msource of Just src -> expandGlob dir (dbg4 "source" src) >>= sortByModTime <&> dbg4 ("matched files"<>desc<>", newest first") where (dir,desc) = if isFileName src then (dldir," in download directory") else (rulesdir,"") Nothing -> return [maybe err (dbg4 "inferred source") $ dataFileFor f] -- shouldn't fail, f has .rules extension where err = error' $ "could not infer a data file for " <> f return $ dbg4 "data file" $ headMay fs case mdatafile of Nothing -> return nulljournal -- data file specified by source rule was not found Just dat -> do exists <- liftIO $ doesFileExist dat if not (dat=="-" || exists) then return nulljournal -- data file inferred from rules file name was not found else do t <- liftIO $ readFileOrStdinPortably dat readJournalFromCsv (Just $ Left rules) dat t -- apply any command line account aliases. Can fail with a bad replacement pattern. >>= liftEither . journalApplyAliases (aliasesFromOpts iopts) -- journalFinalise assumes the journal's items are -- reversed, as produced by JournalReader's parser. -- But here they are already properly ordered. So we'd -- better preemptively reverse them once more. XXX inefficient . journalReverse >>= journalFinalise iopts{balancingopts_=(balancingopts_ iopts){ignore_assertions_=True}} f "" --- ** reading rules files --- *** rules utilities _RULES_READING__________________________________________ = undefined -- | Given a rules file path, what would be the corresponding data file ? -- (Remove a .rules extension.) dataFileFor :: FilePath -> Maybe FilePath dataFileFor = stripExtension "rules" -- | Given a csv file path, what would be the corresponding rules file ? -- (Add a .rules extension.) rulesFileFor :: FilePath -> FilePath rulesFileFor = (++ ".rules") -- | An exception-throwing IO action that reads and validates -- the specified CSV rules file (which may include other rules files). readRulesFile :: FilePath -> ExceptT String IO CsvRules readRulesFile f = liftIO (do dbg6IO "using conversion rules file" f readFilePortably f >>= expandIncludes (takeDirectory f) ) >>= either throwError return . parseAndValidateCsvRules f -- | Inline all files referenced by include directives in this hledger CSV rules text, recursively. -- Included file paths may be relative to the directory of the provided file path. -- This is done as a pre-parse step to simplify the CSV rules parser. expandIncludes :: FilePath -> Text -> IO Text expandIncludes dir0 content = mapM (expandLine dir0) (T.lines content) <&> T.unlines where expandLine dir1 line = case line of (T.stripPrefix "include " -> Just f) -> expandIncludes dir2 =<< T.readFile f' where f' = dir1 T.unpack (T.dropWhile isSpace f) dir2 = takeDirectory f' _ -> return line -- defaultRulesText :: FilePath -> Text -- defaultRulesText _csvfile = T.pack $ unlines -- ["# hledger csv conversion rules" -- for " ++ csvFileFor (takeFileName csvfile) -- ,"# cf http://hledger.org/hledger.html#csv" -- ,"" -- ,"account1 assets:bank:checking" -- ,"" -- ,"fields date, description, amount1" -- ,"" -- ,"#skip 1" -- ,"#newest-first" -- ,"" -- ,"#date-format %-d/%-m/%Y" -- ,"#date-format %-m/%-d/%Y" -- ,"#date-format %Y-%h-%d" -- ,"" -- ,"#currency $" -- ,"" -- ,"if ITUNES" -- ," account2 expenses:entertainment" -- ,"" -- ,"if (TO|FROM) SAVINGS" -- ," account2 assets:bank:savings\n" -- ] -- | An error-throwing IO action that parses this text as CSV conversion rules -- and runs some extra validation checks. The file path is used in error messages. parseAndValidateCsvRules :: FilePath -> T.Text -> Either String CsvRules parseAndValidateCsvRules rulesfile s = case parseCsvRules rulesfile s of Left err -> Left $ customErrorBundlePretty err Right rules -> first makeFancyParseError $ validateCsvRules rules where makeFancyParseError :: String -> String makeFancyParseError errorString = parseErrorPretty (FancyError 0 (S.singleton $ ErrorFail errorString) :: ParseError Text String) instance ShowErrorComponent String where showErrorComponent = id -- | Parse this text as CSV conversion rules. The file path is for error messages. parseCsvRules :: FilePath -> T.Text -> Either (ParseErrorBundle T.Text HledgerParseErrorData) CsvRules -- parseCsvRules rulesfile s = runParser csvrulesfile nullrules{baseAccount=takeBaseName rulesfile} rulesfile s parseCsvRules = runParser (evalStateT rulesp defrules) -- | Return the validated rules, or an error. validateCsvRules :: CsvRules -> Either String CsvRules validateCsvRules rules = do unless (isAssigned "date") $ Left "Please specify (at top level) the date field. Eg: date %1" Right rules where isAssigned f = isJust $ getEffectiveAssignment rules [] f --- *** rules types _RULES_TYPES__________________________________________ = undefined -- | A set of data definitions and account-matching patterns sufficient to -- convert a particular CSV data file into meaningful journal transactions. data CsvRules' a = CsvRules' { rdirectives :: [(DirectiveName,Text)], -- ^ top-level rules, as (keyword, value) pairs rcsvfieldindexes :: [(CsvFieldName, CsvFieldIndex)], -- ^ csv field names and their column number, if declared by a fields list rassignments :: [(HledgerFieldName, FieldTemplate)], -- ^ top-level assignments to hledger fields, as (field name, value template) pairs rconditionalblocks :: [ConditionalBlock], -- ^ conditional blocks, which containing additional assignments/rules to apply to matched csv records rblocksassigning :: a -- (String -> [ConditionalBlock]) -- ^ all conditional blocks which can potentially assign field with a given name (memoized) } -- | Type used by parsers. Directives, assignments and conditional blocks -- are in the reverse order compared to what is in the file and rblocksassigning is non-functional, -- could not be used for processing CSV records yet type CsvRulesParsed = CsvRules' () -- | Type used after parsing is done. Directives, assignments and conditional blocks -- are in the same order as they were in the unput file and rblocksassigning is functional. -- Ready to be used for CSV record processing type CsvRules = CsvRules' (Text -> [ConditionalBlock]) -- XXX simplify instance Eq CsvRules where r1 == r2 = (rdirectives r1, rcsvfieldindexes r1, rassignments r1) == (rdirectives r2, rcsvfieldindexes r2, rassignments r2) -- Custom Show instance used for debug output: omit the rblocksassigning field, which isn't showable. instance Show CsvRules where show r = "CsvRules { rdirectives = " ++ show (rdirectives r) ++ ", rcsvfieldindexes = " ++ show (rcsvfieldindexes r) ++ ", rassignments = " ++ show (rassignments r) ++ ", rconditionalblocks = " ++ show (rconditionalblocks r) ++ " }" type CsvRulesParser a = StateT CsvRulesParsed SimpleTextParser a -- | The keyword of a CSV rule - "fields", "skip", "if", etc. type DirectiveName = Text -- | CSV field name. type CsvFieldName = Text -- | 1-based CSV column number. type CsvFieldIndex = Int -- | Percent symbol followed by a CSV field name or column number. Eg: %date, %1. type CsvFieldReference = Text -- | One of the standard hledger fields or pseudo-fields that can be assigned to. -- Eg date, account1, amount, amount1-in, date-format. type HledgerFieldName = Text -- | A text value to be assigned to a hledger field, possibly -- containing csv field references to be interpolated. type FieldTemplate = Text -- | A strptime date parsing pattern, as supported by Data.Time.Format. type DateFormat = Text -- | A prefix for a matcher test, either & or none (implicit or). data MatcherPrefix = And | None deriving (Show, Eq) -- | A single test for matching a CSV record, in one way or another. data Matcher = RecordMatcher MatcherPrefix Regexp -- ^ match if this regexp matches the overall CSV record | FieldMatcher MatcherPrefix CsvFieldReference Regexp -- ^ match if this regexp matches the referenced CSV field's value deriving (Show, Eq) -- | A conditional block: a set of CSV record matchers, and a sequence -- of rules which will be enabled only if one or more of the matchers -- succeeds. -- -- Three types of rule are allowed inside conditional blocks: field -- assignments, skip, end. (A skip or end rule is stored as if it was -- a field assignment, and executed in validateCsv. XXX) data ConditionalBlock = CB { cbMatchers :: [Matcher] ,cbAssignments :: [(HledgerFieldName, FieldTemplate)] } deriving (Show, Eq) defrules :: CsvRulesParsed defrules = CsvRules' { rdirectives=[], rcsvfieldindexes=[], rassignments=[], rconditionalblocks=[], rblocksassigning = () } -- | Create CsvRules from the content parsed out of the rules file mkrules :: CsvRulesParsed -> CsvRules mkrules rules = let conditionalblocks = reverse $ rconditionalblocks rules maybeMemo = if length conditionalblocks >= 15 then memo else id in CsvRules' { rdirectives=reverse $ rdirectives rules, rcsvfieldindexes=rcsvfieldindexes rules, rassignments=reverse $ rassignments rules, rconditionalblocks=conditionalblocks, rblocksassigning = maybeMemo (\f -> filter (any ((==f).fst) . cbAssignments) conditionalblocks) } --- *** rules parsers _RULES_PARSING__________________________________________ = undefined {- Grammar for the CSV conversion rules, more or less: RULES: RULE* RULE: ( SOURCE | FIELD-LIST | FIELD-ASSIGNMENT | CONDITIONAL-BLOCK | SKIP | TIMEZONE | NEWEST-FIRST | INTRA-DAY-REVERSED | DATE-FORMAT | DECIMAL-MARK | COMMENT | BLANK ) NEWLINE SOURCE: source SPACE FILEPATH FIELD-LIST: fields SPACE FIELD-NAME ( SPACE? , SPACE? FIELD-NAME )* FIELD-NAME: QUOTED-FIELD-NAME | BARE-FIELD-NAME QUOTED-FIELD-NAME: " (any CHAR except double-quote)+ " BARE-FIELD-NAME: any CHAR except space, tab, #, ; FIELD-ASSIGNMENT: JOURNAL-FIELD ASSIGNMENT-SEPARATOR FIELD-VALUE JOURNAL-FIELD: date | date2 | status | code | description | comment | account1 | account2 | amount | JOURNAL-PSEUDO-FIELD JOURNAL-PSEUDO-FIELD: amount-in | amount-out | currency ASSIGNMENT-SEPARATOR: SPACE | ( : SPACE? ) FIELD-VALUE: VALUE (possibly containing CSV-FIELD-REFERENCEs) CSV-FIELD-REFERENCE: % CSV-FIELD CSV-FIELD: ( FIELD-NAME | FIELD-NUMBER ) (corresponding to a CSV field) FIELD-NUMBER: DIGIT+ CONDITIONAL-BLOCK: if ( FIELD-MATCHER NEWLINE )+ INDENTED-BLOCK FIELD-MATCHER: ( CSV-FIELD-NAME SPACE? )? ( MATCHOP SPACE? )? PATTERNS MATCHOP: ~ PATTERNS: ( NEWLINE REGEXP )* REGEXP INDENTED-BLOCK: ( SPACE ( FIELD-ASSIGNMENT | COMMENT ) NEWLINE )+ REGEXP: ( NONSPACE CHAR* ) SPACE? VALUE: SPACE? ( CHAR* ) SPACE? COMMENT: SPACE? COMMENT-CHAR VALUE COMMENT-CHAR: # | ; | * NONSPACE: any CHAR not a SPACE-CHAR BLANK: SPACE? SPACE: SPACE-CHAR+ SPACE-CHAR: space | tab CHAR: any character except newline DIGIT: 0-9 -} addDirective :: (DirectiveName, Text) -> CsvRulesParsed -> CsvRulesParsed addDirective d r = r{rdirectives=d:rdirectives r} addAssignment :: (HledgerFieldName, FieldTemplate) -> CsvRulesParsed -> CsvRulesParsed addAssignment a r = r{rassignments=a:rassignments r} setIndexesAndAssignmentsFromList :: [CsvFieldName] -> CsvRulesParsed -> CsvRulesParsed setIndexesAndAssignmentsFromList fs = addAssignmentsFromList fs . setCsvFieldIndexesFromList fs where setCsvFieldIndexesFromList :: [CsvFieldName] -> CsvRulesParsed -> CsvRulesParsed setCsvFieldIndexesFromList fs' r = r{rcsvfieldindexes=zip fs' [1..]} addAssignmentsFromList :: [CsvFieldName] -> CsvRulesParsed -> CsvRulesParsed addAssignmentsFromList fs' r = foldl' maybeAddAssignment r journalfieldnames where maybeAddAssignment rules f = (maybe id addAssignmentFromIndex $ elemIndex f fs') rules where addAssignmentFromIndex i = addAssignment (f, T.pack $ '%':show (i+1)) addConditionalBlock :: ConditionalBlock -> CsvRulesParsed -> CsvRulesParsed addConditionalBlock b r = r{rconditionalblocks=b:rconditionalblocks r} addConditionalBlocks :: [ConditionalBlock] -> CsvRulesParsed -> CsvRulesParsed addConditionalBlocks bs r = r{rconditionalblocks=bs++rconditionalblocks r} rulesp :: CsvRulesParser CsvRules rulesp = do _ <- many $ choice [blankorcommentlinep "blank or comment line" ,(directivep >>= modify' . addDirective) "directive" ,(fieldnamelistp >>= modify' . setIndexesAndAssignmentsFromList) "field name list" ,(fieldassignmentp >>= modify' . addAssignment) "field assignment" -- conditionalblockp backtracks because it shares "if" prefix with conditionaltablep. ,try (conditionalblockp >>= modify' . addConditionalBlock) "conditional block" -- 'reverse' is there to ensure that conditions are added in the order they listed in the file ,(conditionaltablep >>= modify' . addConditionalBlocks . reverse) "conditional table" ] eof mkrules <$> get blankorcommentlinep :: CsvRulesParser () blankorcommentlinep = lift (dbgparse 8 "trying blankorcommentlinep") >> choiceInState [blanklinep, commentlinep] blanklinep :: CsvRulesParser () blanklinep = lift skipNonNewlineSpaces >> newline >> return () "blank line" commentlinep :: CsvRulesParser () commentlinep = lift skipNonNewlineSpaces >> commentcharp >> lift restofline >> return () "comment line" commentcharp :: CsvRulesParser Char commentcharp = oneOf (";#*" :: [Char]) directivep :: CsvRulesParser (DirectiveName, Text) directivep = (do lift $ dbgparse 8 "trying directive" d <- choiceInState $ map (lift . string) directives v <- (((char ':' >> lift (many spacenonewline)) <|> lift (some spacenonewline)) >> directivevalp) <|> (optional (char ':') >> lift skipNonNewlineSpaces >> lift eolof >> return "") return (d, v) ) "directive" directives :: [Text] directives = ["source" ,"date-format" ,"decimal-mark" ,"separator" -- ,"default-account" -- ,"default-currency" ,"skip" ,"timezone" ,"newest-first" ,"intra-day-reversed" , "balance-type" ] directivevalp :: CsvRulesParser Text directivevalp = T.pack <$> anySingle `manyTill` lift eolof fieldnamelistp :: CsvRulesParser [CsvFieldName] fieldnamelistp = (do lift $ dbgparse 8 "trying fieldnamelist" string "fields" optional $ char ':' lift skipNonNewlineSpaces1 let separator = lift skipNonNewlineSpaces >> char ',' >> lift skipNonNewlineSpaces f <- fromMaybe "" <$> optional fieldnamep fs <- some $ (separator >> fromMaybe "" <$> optional fieldnamep) lift restofline return . map T.toLower $ f:fs ) "field name list" fieldnamep :: CsvRulesParser Text fieldnamep = quotedfieldnamep <|> barefieldnamep quotedfieldnamep :: CsvRulesParser Text quotedfieldnamep = char '"' *> takeWhile1P Nothing (`notElem` ("\"\n:;#~" :: [Char])) <* char '"' barefieldnamep :: CsvRulesParser Text barefieldnamep = takeWhile1P Nothing (`notElem` (" \t\n,;#~" :: [Char])) fieldassignmentp :: CsvRulesParser (HledgerFieldName, FieldTemplate) fieldassignmentp = do lift $ dbgparse 8 "trying fieldassignmentp" f <- journalfieldnamep v <- choiceInState [ assignmentseparatorp >> fieldvalp , lift eolof >> return "" ] return (f,v) "field assignment" journalfieldnamep :: CsvRulesParser Text journalfieldnamep = do lift (dbgparse 8 "trying journalfieldnamep") choiceInState $ map (lift . string) journalfieldnames maxpostings = 99 -- Transaction fields and pseudo fields for CSV conversion. -- Names must precede any other name they contain, for the parser -- (amount-in before amount; date2 before date). TODO: fix journalfieldnames = concat [[ "account" <> i ,"amount" <> i <> "-in" ,"amount" <> i <> "-out" ,"amount" <> i ,"balance" <> i ,"comment" <> i ,"currency" <> i ] | x <- [maxpostings, (maxpostings-1)..1], let i = T.pack $ show x] ++ ["amount-in" ,"amount-out" ,"amount" ,"balance" ,"code" ,"comment" ,"currency" ,"date2" ,"date" ,"description" ,"status" ,"skip" -- skip and end are not really fields, but we list it here to allow conditional rules that skip records ,"end" ] assignmentseparatorp :: CsvRulesParser () assignmentseparatorp = do lift $ dbgparse 8 "trying assignmentseparatorp" _ <- choiceInState [ lift skipNonNewlineSpaces >> char ':' >> lift skipNonNewlineSpaces , lift skipNonNewlineSpaces1 ] return () fieldvalp :: CsvRulesParser Text fieldvalp = do lift $ dbgparse 8 "trying fieldvalp" T.pack <$> anySingle `manyTill` lift eolof -- A conditional block: one or more matchers, one per line, followed by one or more indented rules. conditionalblockp :: CsvRulesParser ConditionalBlock conditionalblockp = do lift $ dbgparse 8 "trying conditionalblockp" -- "if\nMATCHER" or "if \nMATCHER" or "if MATCHER" start <- getOffset string "if" >> ( (newline >> return Nothing) <|> (lift skipNonNewlineSpaces1 >> optional newline)) ms <- some matcherp as <- catMaybes <$> many (lift skipNonNewlineSpaces1 >> choice [ lift eolof >> return Nothing , fmap Just fieldassignmentp ]) when (null as) $ customFailure $ parseErrorAt start $ "start of conditional block found, but no assignment rules afterward\n(assignment rules in a conditional block should be indented)" return $ CB{cbMatchers=ms, cbAssignments=as} "conditional block" -- A conditional table: "if" followed by separator, followed by some field names, -- followed by many lines, each of which has: -- one matchers, followed by field assignments (as many as there were fields) conditionaltablep :: CsvRulesParser [ConditionalBlock] conditionaltablep = do lift $ dbgparse 8 "trying conditionaltablep" start <- getOffset string "if" sep <- lift $ satisfy (\c -> not (isAlphaNum c || isSpace c)) fields <- journalfieldnamep `sepBy1` (char sep) newline body <- flip manyTill (lift eolof) $ do off <- getOffset m <- matcherp' $ void $ char sep vs <- T.split (==sep) . T.pack <$> lift restofline if (length vs /= length fields) then customFailure $ parseErrorAt off $ ((printf "line of conditional table should have %d values, but this one has only %d" (length fields) (length vs)) :: String) else return (m,vs) when (null body) $ customFailure $ parseErrorAt start $ "start of conditional table found, but no assignment rules afterward" return $ flip map body $ \(m,vs) -> CB{cbMatchers=[m], cbAssignments=zip fields vs} "conditional table" -- A single matcher, on one line. matcherp' :: CsvRulesParser () -> CsvRulesParser Matcher matcherp' end = try (fieldmatcherp end) <|> recordmatcherp end matcherp :: CsvRulesParser Matcher matcherp = matcherp' (lift eolof) -- A single whole-record matcher. -- A pattern on the whole line, not beginning with a csv field reference. recordmatcherp :: CsvRulesParser () -> CsvRulesParser Matcher recordmatcherp end = do lift $ dbgparse 8 "trying recordmatcherp" -- pos <- currentPos -- _ <- optional (matchoperatorp >> lift skipNonNewlineSpaces >> optional newline) p <- matcherprefixp r <- regexp end return $ RecordMatcher p r -- when (null ps) $ -- Fail.fail "start of record matcher found, but no patterns afterward\n(patterns should not be indented)" "record matcher" -- | A single matcher for a specific field. A csv field reference -- (like %date or %1), and a pattern on the rest of the line, -- optionally space-separated. Eg: -- %description chez jacques fieldmatcherp :: CsvRulesParser () -> CsvRulesParser Matcher fieldmatcherp end = do lift $ dbgparse 8 "trying fieldmatcher" -- An optional fieldname (default: "all") -- f <- fromMaybe "all" `fmap` (optional $ do -- f' <- fieldnamep -- lift skipNonNewlineSpaces -- return f') p <- matcherprefixp f <- csvfieldreferencep <* lift skipNonNewlineSpaces -- optional operator.. just ~ (case insensitive infix regex) for now -- _op <- fromMaybe "~" <$> optional matchoperatorp lift skipNonNewlineSpaces r <- regexp end return $ FieldMatcher p f r "field matcher" matcherprefixp :: CsvRulesParser MatcherPrefix matcherprefixp = do lift $ dbgparse 8 "trying matcherprefixp" (char '&' >> lift skipNonNewlineSpaces >> return And) <|> return None csvfieldreferencep :: CsvRulesParser CsvFieldReference csvfieldreferencep = do lift $ dbgparse 8 "trying csvfieldreferencep" char '%' T.cons '%' . textQuoteIfNeeded <$> fieldnamep -- A single regular expression regexp :: CsvRulesParser () -> CsvRulesParser Regexp regexp end = do lift $ dbgparse 8 "trying regexp" -- notFollowedBy matchoperatorp c <- lift nonspace cs <- anySingle `manyTill` end case toRegexCI . T.strip . T.pack $ c:cs of Left x -> Fail.fail $ "CSV parser: " ++ x Right x -> return x -- -- A match operator, indicating the type of match to perform. -- -- Currently just ~ meaning case insensitive infix regex match. -- matchoperatorp :: CsvRulesParser String -- matchoperatorp = fmap T.unpack $ choiceInState $ map string -- ["~" -- -- ,"!~" -- -- ,"=" -- -- ,"!=" -- ] _RULES_LOOKUP__________________________________________ = undefined getDirective :: DirectiveName -> CsvRules -> Maybe FieldTemplate getDirective directivename = lookup directivename . rdirectives -- | Look up the value (template) of a csv rule by rule keyword. csvRule :: CsvRules -> DirectiveName -> Maybe FieldTemplate csvRule rules = (`getDirective` rules) -- | Look up the value template assigned to a hledger field by field -- list/field assignment rules, taking into account the current record and -- conditional rules. hledgerField :: CsvRules -> CsvRecord -> HledgerFieldName -> Maybe FieldTemplate hledgerField = getEffectiveAssignment -- | Look up the final value assigned to a hledger field, with csv field -- references interpolated. hledgerFieldValue :: CsvRules -> CsvRecord -> HledgerFieldName -> Maybe Text hledgerFieldValue rules record = fmap (renderTemplate rules record) . hledgerField rules record -- | Given the conversion rules, a CSV record and a hledger field name, find -- the value template ultimately assigned to this field, if any, by a field -- assignment at top level or in a conditional block matching this record. -- -- Note conditional blocks' patterns are matched against an approximation of the -- CSV record: all the field values, without enclosing quotes, comma-separated. -- getEffectiveAssignment :: CsvRules -> CsvRecord -> HledgerFieldName -> Maybe FieldTemplate getEffectiveAssignment rules record f = lastMay $ map snd $ assignments where -- all active assignments to field f, in order assignments = dbg9 "csv assignments" $ filter ((==f).fst) $ toplevelassignments ++ conditionalassignments where -- all top level field assignments toplevelassignments = rassignments rules -- all field assignments in conditional blocks assigning to field f and active for the current csv record conditionalassignments = concatMap cbAssignments $ filter isBlockActive $ (rblocksassigning rules) f where -- does this conditional block match the current csv record ? isBlockActive :: ConditionalBlock -> Bool isBlockActive CB{..} = any (all matcherMatches) $ groupedMatchers cbMatchers where -- does this individual matcher match the current csv record ? matcherMatches :: Matcher -> Bool matcherMatches (RecordMatcher _ pat) = regexMatchText pat' wholecsvline where pat' = dbg7 "regex" pat -- A synthetic whole CSV record to match against. Note, this can be -- different from the original CSV data: -- - any whitespace surrounding field values is preserved -- - any quotes enclosing field values are removed -- - and the field separator is always comma -- which means that a field containing a comma will look like two fields. wholecsvline = dbg7 "wholecsvline" $ T.intercalate "," record matcherMatches (FieldMatcher _ csvfieldref pat) = regexMatchText pat csvfieldvalue where -- the value of the referenced CSV field to match against. csvfieldvalue = dbg7 "csvfieldvalue" $ replaceCsvFieldReference rules record csvfieldref -- | Group matchers into associative pairs based on prefix, e.g.: -- A -- & B -- C -- D -- & E -- => [[A, B], [C], [D, E]] groupedMatchers :: [Matcher] -> [[Matcher]] groupedMatchers [] = [] groupedMatchers (x:xs) = (x:ys) : groupedMatchers zs where (ys, zs) = span (\y -> matcherPrefix y == And) xs matcherPrefix :: Matcher -> MatcherPrefix matcherPrefix (RecordMatcher prefix _) = prefix matcherPrefix (FieldMatcher prefix _ _) = prefix -- | Render a field assignment's template, possibly interpolating referenced -- CSV field values. Outer whitespace is removed from interpolated values. renderTemplate :: CsvRules -> CsvRecord -> FieldTemplate -> Text renderTemplate rules record t = maybe t mconcat $ parseMaybe (many $ takeWhile1P Nothing (/='%') <|> replaceCsvFieldReference rules record <$> referencep) t where referencep = liftA2 T.cons (char '%') (takeWhile1P (Just "reference") isFieldNameChar) :: Parsec HledgerParseErrorData Text Text isFieldNameChar c = isAlphaNum c || c == '_' || c == '-' -- | Replace something that looks like a reference to a csv field ("%date" or "%1) -- with that field's value. If it doesn't look like a field reference, or if we -- can't find such a field, replace it with the empty string. replaceCsvFieldReference :: CsvRules -> CsvRecord -> CsvFieldReference -> Text replaceCsvFieldReference rules record s = case T.uncons s of Just ('%', fieldname) -> fromMaybe "" $ csvFieldValue rules record fieldname _ -> s -- | Get the (whitespace-stripped) value of a CSV field, identified by its name or -- column number, ("date" or "1"), from the given CSV record, if such a field exists. csvFieldValue :: CsvRules -> CsvRecord -> CsvFieldName -> Maybe Text csvFieldValue rules record fieldname = do fieldindex <- if T.all isDigit fieldname then readMay $ T.unpack fieldname else lookup (T.toLower fieldname) $ rcsvfieldindexes rules T.strip <$> atMay record (fieldindex-1) _CSV_READING__________________________________________ = undefined -- | Read a Journal from the given CSV data (and filename, used for error -- messages), or return an error. Proceed as follows: -- -- 1. Conversion rules are provided, or they are parsed from the specified -- rules file, or from the default rules file for the CSV data file. -- If rules parsing fails, or the required rules file does not exist, throw an error. -- -- 2. Parse the CSV data using the rules, or throw an error. -- -- 3. Convert the CSV records to hledger transactions using the rules. -- -- 4. Return the transactions as a Journal. -- readJournalFromCsv :: Maybe (Either CsvRules FilePath) -> FilePath -> Text -> ExceptT String IO Journal readJournalFromCsv Nothing "-" _ = throwError "please use --rules-file when reading CSV from stdin" readJournalFromCsv merulesfile csvfile csvtext = do -- for now, correctness is the priority here, efficiency not so much rules <- case merulesfile of Just (Left rs) -> return rs Just (Right rulesfile) -> readRulesFile rulesfile Nothing -> readRulesFile $ rulesFileFor csvfile dbg6IO "csv rules" rules -- convert the csv data to lines and remove all empty/blank lines let csvlines1 = dbg9 "csvlines1" $ filter (not . T.null . T.strip) $ dbg9 "csvlines0" $ T.lines csvtext -- if there is a top-level skip rule, skip the specified number of non-empty lines skiplines <- case getDirective "skip" rules of Nothing -> return 0 Just "" -> return 1 Just s -> maybe (throwError $ "could not parse skip value: " ++ show s) return . readMay $ T.unpack s let csvlines2 = dbg9 "csvlines2" $ drop skiplines csvlines1 -- convert back to text and parse as csv records let csvtext1 = T.unlines csvlines2 separator = case getDirective "separator" rules >>= parseSeparator of Just c -> c _ | ext == "ssv" -> ';' _ | ext == "tsv" -> '\t' _ -> ',' where ext = map toLower $ drop 1 $ takeExtension csvfile -- parsec seemed to fail if you pass it "-" here -- TODO: try again with megaparsec parsecfilename = if csvfile == "-" then "(stdin)" else csvfile dbg6IO "using separator" separator -- parse csv records csvrecords0 <- dbg7 "parseCsv" <$> parseCsv separator parsecfilename csvtext1 -- remove any records skipped by conditional skip or end rules let csvrecords1 = applyConditionalSkips rules csvrecords0 -- and check the remaining records for any obvious problems csvrecords <- liftEither $ dbg7 "validateCsv" <$> validateCsv csvrecords1 dbg6IO "first 3 csv records" $ take 3 csvrecords -- XXX identify header lines some day ? -- let (headerlines, datalines) = identifyHeaderLines csvrecords' -- mfieldnames = lastMay headerlines tzout <- liftIO getCurrentTimeZone mtzin <- case getDirective "timezone" rules of Nothing -> return Nothing Just s -> maybe (throwError $ "could not parse time zone: " ++ T.unpack s) (return.Just) $ parseTimeM False defaultTimeLocale "%Z" $ T.unpack s let -- convert CSV records to transactions, saving the CSV line numbers for error positions txns = dbg7 "csv txns" $ snd $ mapAccumL (\pos r -> let SourcePos name line col = pos line' = (mkPos . (+1) . unPos) line pos' = SourcePos name line' col in (pos', transactionFromCsvRecord timesarezoned mtzin tzout pos rules r) ) (initialPos parsecfilename) csvrecords where timesarezoned = case csvRule rules "date-format" of Just f | any (`T.isInfixOf` f) ["%Z","%z","%EZ","%Ez"] -> True _ -> False -- Do our best to ensure transactions will be ordered chronologically, -- from oldest to newest. This is done in several steps: -- 1. Intra-day order: if there's an "intra-day-reversed" rule, -- assume each day's CSV records were ordered in reverse of the overall date order, -- so reverse each day's txns. intradayreversed = dbg6 "intra-day-reversed" $ isJust $ getDirective "intra-day-reversed" rules txns1 = dbg7 "txns1" $ (if intradayreversed then concatMap reverse . groupOn tdate else id) txns -- 2. Overall date order: now if there's a "newest-first" rule, -- or if there's multiple dates and the first is more recent than the last, -- assume CSV records were ordered newest dates first, -- so reverse all txns. newestfirst = dbg6 "newest-first" $ isJust $ getDirective "newest-first" rules mdatalooksnewestfirst = dbg6 "mdatalooksnewestfirst" $ case nub $ map tdate txns of ds | length ds > 1 -> Just $ head ds > last ds _ -> Nothing txns2 = dbg7 "txns2" $ (if newestfirst || mdatalooksnewestfirst == Just True then reverse else id) txns1 -- 3. Disordered dates: in case the CSV records were ordered by chaos, -- do a final sort by date. If it was only a few records out of order, -- this will hopefully refine any good ordering done by steps 1 and 2. txns3 = dbg7 "date-sorted csv txns" $ sortOn tdate txns2 return nulljournal{jtxns=txns3} -- | Parse special separator names TAB and SPACE, or return the first -- character. Return Nothing on empty string parseSeparator :: Text -> Maybe Char parseSeparator = specials . T.toLower where specials "space" = Just ' ' specials "tab" = Just '\t' specials xs = fst <$> T.uncons xs -- Call parseCassava on a file or stdin, converting the result to ExceptT. parseCsv :: Char -> FilePath -> Text -> ExceptT String IO [CsvRecord] parseCsv separator filePath csvtext = ExceptT $ case filePath of "-" -> parseCassava separator "(stdin)" <$> T.getContents _ -> return $ if T.null csvtext then Right mempty else parseCassava separator filePath csvtext -- Parse text into CSV records, using Cassava and the given field separator. parseCassava :: Char -> FilePath -> Text -> Either String [CsvRecord] parseCassava separator path content = -- XXX we now remove all blank lines before parsing; will Cassava will still produce [""] records ? -- filter (/=[""]) either (Left . errorBundlePretty) (Right . parseResultToCsv) <$> CassavaMegaparsec.decodeWith decodeOptions Cassava.NoHeader path $ BL.fromStrict $ T.encodeUtf8 content where decodeOptions = Cassava.defaultDecodeOptions { Cassava.decDelimiter = fromIntegral (ord separator) } parseResultToCsv :: (Foldable t, Functor t) => t (t B.ByteString) -> [CsvRecord] parseResultToCsv = toListList . unpackFields where toListList = toList . fmap toList unpackFields = (fmap . fmap) T.decodeUtf8 -- | Scan for csv records where a conditional `skip` or `end` rule applies, -- and apply that rule, removing one or more following records. applyConditionalSkips :: CsvRules -> [CsvRecord] -> [CsvRecord] applyConditionalSkips _ [] = [] applyConditionalSkips rules (r:rest) = case skipnum r of Nothing -> r : applyConditionalSkips rules rest Just cnt -> applyConditionalSkips rules $ drop (cnt-1) rest where skipnum r1 = case (getEffectiveAssignment rules r1 "end", getEffectiveAssignment rules r1 "skip") of (Nothing, Nothing) -> Nothing (Just _, _) -> Just maxBound (Nothing, Just "") -> Just 1 (Nothing, Just x) -> Just (read $ T.unpack x) -- | Do some validation on the parsed CSV records: -- check that they all have at least two fields. validateCsv :: [CsvRecord] -> Either String [CsvRecord] validateCsv [] = Right [] validateCsv rs@(_first:_) = case lessthan2 of Just r -> Left $ printf "CSV record %s has less than two fields" (show r) Nothing -> Right rs where lessthan2 = headMay $ filter ((<2).length) rs -- -- | The highest (0-based) field index referenced in the field -- -- definitions, or -1 if no fields are defined. -- maxFieldIndex :: CsvRules -> Int -- maxFieldIndex r = maximumDef (-1) $ catMaybes [ -- dateField r -- ,statusField r -- ,codeField r -- ,amountField r -- ,amountInField r -- ,amountOutField r -- ,currencyField r -- ,accountField r -- ,account2Field r -- ,date2Field r -- ] --- ** converting csv records to transactions transactionFromCsvRecord :: Bool -> Maybe TimeZone -> TimeZone -> SourcePos -> CsvRules -> CsvRecord -> Transaction transactionFromCsvRecord timesarezoned mtzin tzout sourcepos rules record = t where ---------------------------------------------------------------------- -- 1. Define some helpers: rule = csvRule rules :: DirectiveName -> Maybe FieldTemplate -- ruleval = csvRuleValue rules record :: DirectiveName -> Maybe String field = hledgerField rules record :: HledgerFieldName -> Maybe FieldTemplate fieldval = hledgerFieldValue rules record :: HledgerFieldName -> Maybe Text mdateformat = rule "date-format" parsedate = parseDateWithCustomOrDefaultFormats timesarezoned mtzin tzout mdateformat mkdateerror datefield datevalue mdateformat' = T.unpack $ T.unlines ["error: could not parse \""<>datevalue<>"\" as a date using date format " <>maybe "\"YYYY/M/D\", \"YYYY-M-D\" or \"YYYY.M.D\"" (T.pack . show) mdateformat' ,showRecord record ,"the "<>datefield<>" rule is: "<>(fromMaybe "required, but missing" $ field datefield) ,"the date-format is: "<>fromMaybe "unspecified" mdateformat' ,"you may need to " <>"change your "<>datefield<>" rule, " <>maybe "add a" (const "change your") mdateformat'<>" date-format rule, " <>"or "<>maybe "add a" (const "change your") mskip<>" skip rule" ,"for m/d/y or d/m/y dates, use date-format %-m/%-d/%Y or date-format %-d/%-m/%Y" ] where mskip = rule "skip" ---------------------------------------------------------------------- -- 2. Gather values needed for the transaction itself, by evaluating the -- field assignment rules using the CSV record's data, and parsing a bit -- more where needed (dates, status). date = fromMaybe "" $ fieldval "date" -- PARTIAL: date' = fromMaybe (error' $ mkdateerror "date" date mdateformat) $ parsedate date mdate2 = fieldval "date2" mdate2' = (maybe (error' $ mkdateerror "date2" (fromMaybe "" mdate2) mdateformat) Just . parsedate) =<< mdate2 status = case fieldval "status" of Nothing -> Unmarked Just s -> either statuserror id $ runParser (statusp <* eof) "" s where statuserror err = error' . T.unpack $ T.unlines ["error: could not parse \""<>s<>"\" as a cleared status (should be *, ! or empty)" ,"the parse error is: "<>T.pack (customErrorBundlePretty err) ] code = maybe "" singleline' $ fieldval "code" description = maybe "" singleline' $ fieldval "description" comment = maybe "" unescapeNewlines $ fieldval "comment" precomment = maybe "" unescapeNewlines $ fieldval "precomment" singleline' = T.unwords . filter (not . T.null) . map T.strip . T.lines unescapeNewlines = T.intercalate "\n" . T.splitOn "\\n" ---------------------------------------------------------------------- -- 3. Generate the postings for which an account has been assigned -- (possibly indirectly due to an amount or balance assignment) p1IsVirtual = (accountNamePostingType <$> fieldval "account1") == Just VirtualPosting ps = [p | n <- [1..maxpostings] ,let cmt = maybe "" unescapeNewlines $ fieldval ("comment"<> T.pack (show n)) ,let currency = fromMaybe "" (fieldval ("currency"<> T.pack (show n)) <|> fieldval "currency") ,let mamount = getAmount rules record currency p1IsVirtual n ,let mbalance = getBalance rules record currency n ,Just (acct,isfinal) <- [getAccount rules record mamount mbalance n] -- skips Nothings ,let acct' | not isfinal && acct==unknownExpenseAccount && fromMaybe False (mamount >>= isNegativeMixedAmount) = unknownIncomeAccount | otherwise = acct ,let p = nullposting{paccount = accountNameWithoutPostingType acct' ,pamount = fromMaybe missingmixedamt mamount ,ptransaction = Just t ,pbalanceassertion = mkBalanceAssertion rules record <$> mbalance ,pcomment = cmt ,ptype = accountNamePostingType acct } ] ---------------------------------------------------------------------- -- 4. Build the transaction (and name it, so the postings can reference it). t = nulltransaction{ tsourcepos = (sourcepos, sourcepos) -- the CSV line number ,tdate = date' ,tdate2 = mdate2' ,tstatus = status ,tcode = code ,tdescription = description ,tcomment = comment ,tprecedingcomment = precomment ,tpostings = ps } -- | Parse the date string using the specified date-format, or if unspecified -- the "simple date" formats (YYYY/MM/DD, YYYY-MM-DD, YYYY.MM.DD, leading -- zeroes optional). If a timezone is provided, we assume the DateFormat -- produces a zoned time and we localise that to the given timezone. parseDateWithCustomOrDefaultFormats :: Bool -> Maybe TimeZone -> TimeZone -> Maybe DateFormat -> Text -> Maybe Day parseDateWithCustomOrDefaultFormats timesarezoned mtzin tzout mformat s = localdate <$> mutctime -- this time code can probably be simpler, I'm just happy to get out alive where localdate :: UTCTime -> Day = localDay . dbg7 ("time in output timezone "++show tzout) . utcToLocalTime tzout mutctime :: Maybe UTCTime = asum $ map parseWithFormat formats parseWithFormat :: String -> Maybe UTCTime parseWithFormat fmt = if timesarezoned then dbg7 "zoned CSV time, expressed as UTC" $ parseTimeM True defaultTimeLocale fmt $ T.unpack s :: Maybe UTCTime else -- parse as a local day and time; then if an input timezone is provided, -- assume it's in that, otherwise assume it's in the output timezone; -- then convert to UTC like the above let mlocaltime = fmap (dbg7 "unzoned CSV time") $ parseTimeM True defaultTimeLocale fmt $ T.unpack s :: Maybe LocalTime localTimeAsZonedTime tz lt = ZonedTime lt tz in case mtzin of Just tzin -> (dbg7 ("unzoned CSV time, declared as "++show tzin++ ", expressed as UTC") . localTimeToUTC tzin) <$> mlocaltime Nothing -> (dbg7 ("unzoned CSV time, treated as "++show tzout++ ", expressed as UTC") . zonedTimeToUTC . localTimeAsZonedTime tzout) <$> mlocaltime formats = map T.unpack $ maybe ["%Y/%-m/%-d" ,"%Y-%-m-%-d" ,"%Y.%-m.%-d" -- ,"%-m/%-d/%Y" -- ,parseTimeM TruedefaultTimeLocale "%Y/%m/%e" (take 5 s ++ "0" ++ drop 5 s) -- ,parseTimeM TruedefaultTimeLocale "%Y-%m-%e" (take 5 s ++ "0" ++ drop 5 s) -- ,parseTimeM TruedefaultTimeLocale "%m/%e/%Y" ('0':s) -- ,parseTimeM TruedefaultTimeLocale "%m-%e-%Y" ('0':s) ] (:[]) mformat -- | Figure out the amount specified for posting N, if any. -- A currency symbol to prepend to the amount, if any, is provided, -- and whether posting 1 requires balancing or not. -- This looks for a non-empty amount value assigned to "amountN", "amountN-in", or "amountN-out". -- For postings 1 or 2 it also looks at "amount", "amount-in", "amount-out". -- If more than one of these has a value, it looks for one that is non-zero. -- If there's multiple non-zeros, or no non-zeros but multiple zeros, it throws an error. getAmount :: CsvRules -> CsvRecord -> Text -> Bool -> Int -> Maybe MixedAmount getAmount rules record currency p1IsVirtual n = -- Warning! Many tricky corner cases here. -- Keep synced with: -- hledger_csv.m4.md -> CSV FORMAT -> "amount", "Setting amounts", -- hledger/test/csv.test -> 13, 31-34 let unnumberedfieldnames = ["amount","amount-in","amount-out"] -- amount field names which can affect this posting fieldnames = map (("amount"<> T.pack (show n))<>) ["","-in","-out"] -- For posting 1, also recognise the old amount/amount-in/amount-out names. -- For posting 2, the same but only if posting 1 needs balancing. ++ if n==1 || n==2 && not p1IsVirtual then unnumberedfieldnames else [] -- assignments to any of these field names with non-empty values assignments = [(f,a') | f <- fieldnames , Just v <- [T.strip . renderTemplate rules record <$> hledgerField rules record f] , not $ T.null v -- XXX maybe ignore rule-generated values like "", "-", "$", "-$", "$-" ? cf CSV FORMAT -> "amount", "Setting amounts", , let a = parseAmount rules record currency v -- With amount/amount-in/amount-out, in posting 2, -- flip the sign and convert to cost, as they did before 1.17 , let a' = if f `elem` unnumberedfieldnames && n==2 then mixedAmountCost (maNegate a) else a ] -- if any of the numbered field names are present, discard all the unnumbered ones discardUnnumbered xs = if null numbered then xs else numbered where numbered = filter (T.any isDigit . fst) xs -- discard all zero amounts, unless all amounts are zero, in which case discard all but the first discardExcessZeros xs = if null nonzeros then take 1 xs else nonzeros where nonzeros = filter (not . mixedAmountLooksZero . snd) xs -- for -out fields, flip the sign XXX unless it's already negative ? back compat issues / too confusing ? negateIfOut f = if "-out" `T.isSuffixOf` f then maNegate else id in case discardExcessZeros $ discardUnnumbered assignments of [] -> Nothing [(f,a)] -> Just $ negateIfOut f a fs -> error' . T.unpack . textChomp . T.unlines $ -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: ["in CSV rules:" ,"While processing " <> showRecord record ,"while calculating amount for posting " <> T.pack (show n) ] ++ ["rule \"" <> f <> " " <> fromMaybe "" (hledgerField rules record f) <> "\" assigned value \"" <> wbToText (showMixedAmountB noColour a) <> "\"" -- XXX not sure this is showing all the right info | (f,a) <- fs ] ++ ["" ,"Multiple non-zero amounts were assigned for an amount field." ,"Please ensure just one non-zero amount is assigned, perhaps with an if rule." ,"See also: https://hledger.org/hledger.html#setting-amounts" ,"(hledger manual -> CSV format -> Tips -> Setting amounts)" ] -- | Figure out the expected balance (assertion or assignment) specified for posting N, -- if any (and its parse position). getBalance :: CsvRules -> CsvRecord -> Text -> Int -> Maybe (Amount, SourcePos) getBalance rules record currency n = do v <- (fieldval ("balance"<> T.pack (show n)) -- for posting 1, also recognise the old field name <|> if n==1 then fieldval "balance" else Nothing) case v of "" -> Nothing s -> Just ( parseBalanceAmount rules record currency n s ,initialPos "" -- parse position to show when assertion fails, ) -- XXX the csv record's line number would be good where fieldval = fmap T.strip . hledgerFieldValue rules record :: HledgerFieldName -> Maybe Text -- | Given a non-empty amount string (from CSV) to parse, along with a -- possibly non-empty currency symbol to prepend, -- parse as a hledger MixedAmount (as in journal format), or raise an error. -- The whole CSV record is provided for the error message. parseAmount :: CsvRules -> CsvRecord -> Text -> Text -> MixedAmount parseAmount rules record currency s = either mkerror mixedAmount $ -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: -- PARTIAL: runParser (evalStateT (amountp <* eof) journalparsestate) "" $ currency <> simplifySign s where journalparsestate = nulljournal{jparsedecimalmark=parseDecimalMark rules} mkerror e = error' . T.unpack $ T.unlines ["error: could not parse \"" <> s <> "\" as an amount" ,showRecord record ,showRules rules record -- ,"the default-currency is: "++fromMaybe "unspecified" (getDirective "default-currency" rules) ,"the parse error is: " <> T.pack (customErrorBundlePretty e) ,"you may need to \ \change your amount*, balance*, or currency* rules, \ \or add or change your skip rule" ] -- | Show the values assigned to each journal field. showRules rules record = T.unlines $ catMaybes [ (("the "<>fld<>" rule is: ")<>) <$> getEffectiveAssignment rules record fld | fld <- journalfieldnames ] -- | Show a (approximate) recreation of the original CSV record. showRecord :: CsvRecord -> Text showRecord r = "CSV record: "<>T.intercalate "," (map (wrap "\"" "\"") r) -- XXX unify these ^v -- | Almost but not quite the same as parseAmount. -- Given a non-empty amount string (from CSV) to parse, along with a -- possibly non-empty currency symbol to prepend, -- parse as a hledger Amount (as in journal format), or raise an error. -- The CSV record and the field's numeric suffix are provided for the error message. parseBalanceAmount :: CsvRules -> CsvRecord -> Text -> Int -> Text -> Amount parseBalanceAmount rules record currency n s = either (mkerror n s) id $ runParser (evalStateT (amountp <* eof) journalparsestate) "" $ currency <> simplifySign s -- the csv record's line number would be good where journalparsestate = nulljournal{jparsedecimalmark=parseDecimalMark rules} mkerror n' s' e = error' . T.unpack $ T.unlines ["error: could not parse \"" <> s' <> "\" as balance"<> T.pack (show n') <> " amount" ,showRecord record ,showRules rules record -- ,"the default-currency is: "++fromMaybe "unspecified" mdefaultcurrency ,"the parse error is: "<> T.pack (customErrorBundlePretty e) ] -- Read a valid decimal mark from the decimal-mark rule, if any. -- If the rule is present with an invalid argument, raise an error. parseDecimalMark :: CsvRules -> Maybe DecimalMark parseDecimalMark rules = do s <- rules `csvRule` "decimal-mark" case T.uncons s of Just (c, rest) | T.null rest && isDecimalMark c -> return c _ -> error' . T.unpack $ "decimal-mark's argument should be \".\" or \",\" (not \""<>s<>"\")" -- | Make a balance assertion for the given amount, with the given parse -- position (to be shown in assertion failures), with the assertion type -- possibly set by a balance-type rule. -- The CSV rules and current record are also provided, to be shown in case -- balance-type's argument is bad (XXX refactor). mkBalanceAssertion :: CsvRules -> CsvRecord -> (Amount, SourcePos) -> BalanceAssertion mkBalanceAssertion rules record (amt, pos) = assrt{baamount=amt, baposition=pos} where assrt = case getDirective "balance-type" rules of Nothing -> nullassertion Just "=" -> nullassertion Just "==" -> nullassertion{batotal=True} Just "=*" -> nullassertion{bainclusive=True} Just "==*" -> nullassertion{batotal=True, bainclusive=True} Just x -> error' . T.unpack $ T.unlines -- PARTIAL: [ "balance-type \"" <> x <>"\" is invalid. Use =, ==, =* or ==*." , showRecord record , showRules rules record ] -- | Figure out the account name specified for posting N, if any. -- And whether it is the default unknown account (which may be -- improved later) or an explicitly set account (which may not). getAccount :: CsvRules -> CsvRecord -> Maybe MixedAmount -> Maybe (Amount, SourcePos) -> Int -> Maybe (AccountName, Bool) getAccount rules record mamount mbalance n = let fieldval = hledgerFieldValue rules record :: HledgerFieldName -> Maybe Text maccount = T.strip <$> fieldval ("account"<> T.pack (show n)) in case maccount of -- accountN is set to the empty string - no posting will be generated Just "" -> Nothing -- accountN is set (possibly to "expenses:unknown"! #1192) - mark it final Just a -> -- Check it and reject if invalid.. sometimes people try -- to set an amount or comment along with the account name. case parsewith (accountnamep >> eof) a of Left e -> usageError $ errorBundlePretty e Right _ -> Just (a, True) -- accountN is unset Nothing -> case (mamount, mbalance) of -- amountN is set, or implied by balanceN - set accountN to -- the default unknown account ("expenses:unknown") and -- allow it to be improved later (Just _, _) -> Just (unknownExpenseAccount, False) (_, Just _) -> Just (unknownExpenseAccount, False) -- amountN is also unset - no posting will be generated (Nothing, Nothing) -> Nothing -- | Default account names to use when needed. unknownExpenseAccount = "expenses:unknown" unknownIncomeAccount = "income:unknown" type CsvAmountString = Text -- | Canonicalise the sign in a CSV amount string. -- Such strings can have a minus sign, parentheses (equivalent to minus), -- or any two of these (which cancel out), -- or a plus sign (which is removed), -- or any sign by itself with no following number (which is removed). -- See hledger > CSV FORMAT > Tips > Setting amounts. -- -- These are supported (note, not every possibile combination): -- -- >>> simplifySign "1" -- "1" -- >>> simplifySign "+1" -- "1" -- >>> simplifySign "-1" -- "-1" -- >>> simplifySign "(1)" -- "-1" -- >>> simplifySign "--1" -- "1" -- >>> simplifySign "-(1)" -- "1" -- >>> simplifySign "-+1" -- "-1" -- >>> simplifySign "(-1)" -- "1" -- >>> simplifySign "((1))" -- "1" -- >>> simplifySign "-" -- "" -- >>> simplifySign "()" -- "" -- >>> simplifySign "+" -- "" simplifySign :: CsvAmountString -> CsvAmountString simplifySign amtstr | Just (' ',t) <- T.uncons amtstr = simplifySign t | Just (t,' ') <- T.unsnoc amtstr = simplifySign t | Just ('(',t) <- T.uncons amtstr, Just (amt,')') <- T.unsnoc t = simplifySign $ negateStr amt | Just ('-',b) <- T.uncons amtstr, Just ('(',t) <- T.uncons b, Just (amt,')') <- T.unsnoc t = simplifySign amt | Just ('-',m) <- T.uncons amtstr, Just ('-',amt) <- T.uncons m = amt | Just ('-',m) <- T.uncons amtstr, Just ('+',amt) <- T.uncons m = negateStr amt | amtstr `elem` ["-","+","()"] = "" | Just ('+',amt) <- T.uncons amtstr = simplifySign amt | otherwise = amtstr negateStr :: Text -> Text negateStr amtstr = case T.uncons amtstr of Just ('-',s) -> s _ -> T.cons '-' amtstr --- ** tests _TESTS__________________________________________ = undefined tests_RulesReader = testGroup "RulesReader" [ testGroup "parseCsvRules" [ testCase "empty file" $ parseCsvRules "unknown" "" @?= Right (mkrules defrules) ] ,testGroup "rulesp" [ testCase "trailing comments" $ parseWithState' defrules rulesp "skip\n# \n#\n" @?= Right (mkrules $ defrules{rdirectives = [("skip","")]}) ,testCase "trailing blank lines" $ parseWithState' defrules rulesp "skip\n\n \n" @?= (Right (mkrules $ defrules{rdirectives = [("skip","")]})) ,testCase "no final newline" $ parseWithState' defrules rulesp "skip" @?= (Right (mkrules $ defrules{rdirectives=[("skip","")]})) ,testCase "assignment with empty value" $ parseWithState' defrules rulesp "account1 \nif foo\n account2 foo\n" @?= (Right (mkrules $ defrules{rassignments = [("account1","")], rconditionalblocks = [CB{cbMatchers=[RecordMatcher None (toRegex' "foo")],cbAssignments=[("account2","foo")]}]})) ] ,testGroup "conditionalblockp" [ testCase "space after conditional" $ -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 -- #1120 parseWithState' defrules conditionalblockp "if a\n account2 b\n \n" @?= (Right $ CB{cbMatchers=[RecordMatcher None $ toRegexCI' "a"],cbAssignments=[("account2","b")]}) ], testGroup "csvfieldreferencep" [ testCase "number" $ parseWithState' defrules csvfieldreferencep "%1" @?= (Right "%1") ,testCase "name" $ parseWithState' defrules csvfieldreferencep "%date" @?= (Right "%date") ,testCase "quoted name" $ parseWithState' defrules csvfieldreferencep "%\"csv date\"" @?= (Right "%\"csv date\"") ] ,testGroup "matcherp" [ testCase "recordmatcherp" $ parseWithState' defrules matcherp "A A\n" @?= (Right $ RecordMatcher None $ toRegexCI' "A A") ,testCase "recordmatcherp.starts-with-&" $ parseWithState' defrules matcherp "& A A\n" @?= (Right $ RecordMatcher And $ toRegexCI' "A A") ,testCase "fieldmatcherp.starts-with-%" $ parseWithState' defrules matcherp "description A A\n" @?= (Right $ RecordMatcher None $ toRegexCI' "description A A") ,testCase "fieldmatcherp" $ parseWithState' defrules matcherp "%description A A\n" @?= (Right $ FieldMatcher None "%description" $ toRegexCI' "A A") ,testCase "fieldmatcherp.starts-with-&" $ parseWithState' defrules matcherp "& %description A A\n" @?= (Right $ FieldMatcher And "%description" $ toRegexCI' "A A") -- ,testCase "fieldmatcherp with operator" $ -- parseWithState' defrules matcherp "%description ~ A A\n" @?= (Right $ FieldMatcher "%description" "A A") ] ,testGroup "getEffectiveAssignment" [ let rules = mkrules $ defrules {rcsvfieldindexes=[("csvdate",1)],rassignments=[("date","%csvdate")]} in testCase "toplevel" $ getEffectiveAssignment rules ["a","b"] "date" @?= (Just "%csvdate") ,let rules = mkrules $ defrules{rcsvfieldindexes=[("csvdate",1)], rconditionalblocks=[CB [FieldMatcher None "%csvdate" $ toRegex' "a"] [("date","%csvdate")]]} in testCase "conditional" $ getEffectiveAssignment rules ["a","b"] "date" @?= (Just "%csvdate") ,let rules = mkrules $ defrules{rcsvfieldindexes=[("csvdate",1),("description",2)], rconditionalblocks=[CB [FieldMatcher None "%csvdate" $ toRegex' "a", FieldMatcher None "%description" $ toRegex' "b"] [("date","%csvdate")]]} in testCase "conditional-with-or-a" $ getEffectiveAssignment rules ["a"] "date" @?= (Just "%csvdate") ,let rules = mkrules $ defrules{rcsvfieldindexes=[("csvdate",1),("description",2)], rconditionalblocks=[CB [FieldMatcher None "%csvdate" $ toRegex' "a", FieldMatcher None "%description" $ toRegex' "b"] [("date","%csvdate")]]} in testCase "conditional-with-or-b" $ getEffectiveAssignment rules ["_", "b"] "date" @?= (Just "%csvdate") ,let rules = mkrules $ defrules{rcsvfieldindexes=[("csvdate",1),("description",2)], rconditionalblocks=[CB [FieldMatcher None "%csvdate" $ toRegex' "a", FieldMatcher And "%description" $ toRegex' "b"] [("date","%csvdate")]]} in testCase "conditional.with-and" $ getEffectiveAssignment rules ["a", "b"] "date" @?= (Just "%csvdate") ,let rules = mkrules $ defrules{rcsvfieldindexes=[("csvdate",1),("description",2)], rconditionalblocks=[CB [FieldMatcher None "%csvdate" $ toRegex' "a", FieldMatcher And "%description" $ toRegex' "b", FieldMatcher None "%description" $ toRegex' "c"] [("date","%csvdate")]]} in testCase "conditional.with-and-or" $ getEffectiveAssignment rules ["_", "c"] "date" @?= (Just "%csvdate") ] ] hledger-lib-1.30/Hledger/Read/TimedotReader.hs0000644000000000000000000001741214434445206017325 0ustar0000000000000000--- * -*- outline-regexp:"--- \\*"; -*- --- ** doc -- In Emacs, use TAB on lines beginning with "-- *" to collapse/expand sections. {-| A reader for the "timedot" file format. Example: @ ;DATE ;ACCT DOTS # Each dot represents 15m, spaces are ignored ;ACCT 8 # numbers with or without a following h represent hours ;ACCT 5m # numbers followed by m represent minutes ; on 2/1, 1h was spent on FOSS haskell work, 0.25h on research, etc. 2/1 fos.haskell .... .. biz.research . inc.client1 .... .... .... .... .... .... 2/2 biz.research . inc.client1 .... .... .. @ -} --- ** language {-# LANGUAGE OverloadedStrings #-} --- ** exports module Hledger.Read.TimedotReader ( -- * Reader reader, -- * Misc other exports timedotfilep, ) where --- ** imports import Control.Monad import Control.Monad.Except (ExceptT, liftEither) import Control.Monad.State.Strict import Data.Char (isSpace) import Data.Text (Text) import qualified Data.Text as T import Data.Time (Day) import Text.Megaparsec hiding (parse) import Text.Megaparsec.Char import Hledger.Data import Hledger.Read.Common hiding (emptyorcommentlinep) import Hledger.Utils --- ** doctest setup -- $setup -- >>> :set -XOverloadedStrings --- ** reader reader :: MonadIO m => Reader m reader = Reader {rFormat = "timedot" ,rExtensions = ["timedot"] ,rReadFn = parse ,rParser = timedotp } -- | Parse and post-process a "Journal" from the timedot format, or give an error. parse :: InputOpts -> FilePath -> Text -> ExceptT String IO Journal parse iopts fp t = initialiseAndParseJournal timedotp iopts fp t >>= liftEither . journalApplyAliases (aliasesFromOpts iopts) >>= journalFinalise iopts fp t --- ** utilities traceparse, traceparse' :: String -> TextParser m () traceparse = const $ return () traceparse' = const $ return () -- for debugging: -- traceparse s = traceParse (s++"?") -- traceparse' s = trace s $ return () --- ** parsers {- Rough grammar for timedot format: timedot: preamble day* preamble: (emptyline | commentline | orgheading)* orgheading: orgheadingprefix restofline day: dateline entry* (emptyline | commentline)* dateline: orgheadingprefix? date description? orgheadingprefix: star+ space+ description: restofline ; till semicolon? entry: orgheadingprefix? space* singlespaced (doublespace quantity?)? doublespace: space space+ quantity: (dot (dot | space)* | number | number unit) Date lines and item lines can begin with an org heading prefix, which is ignored. Org headings before the first date line are ignored, regardless of content. -} timedotfilep = timedotp -- XXX rename export above timedotp :: JournalParser m ParsedJournal timedotp = preamblep >> many dayp >> eof >> get preamblep :: JournalParser m () preamblep = do lift $ traceparse "preamblep" many $ notFollowedBy datelinep >> (lift $ emptyorcommentlinep "#;*") lift $ traceparse' "preamblep" -- | Parse timedot day entries to multi-posting time transactions for that day. -- @ -- 2020/2/1 optional day description -- fos.haskell .... .. -- biz.research . -- inc.client1 .... .... .... .... .... .... -- @ dayp :: JournalParser m () dayp = label "timedot day entry" $ do lift $ traceparse "dayp" pos <- getSourcePos (date,desc,comment,tags) <- datelinep commentlinesp ps <- many $ timedotentryp <* commentlinesp endpos <- getSourcePos -- lift $ traceparse' "dayp end" let t = txnTieKnot $ nulltransaction{ tsourcepos = (pos, endpos), tdate = date, tstatus = Cleared, tdescription = desc, tcomment = comment, ttags = tags, tpostings = ps } modify' $ addTransaction t datelinep :: JournalParser m (Day,Text,Text,[Tag]) datelinep = do lift $ traceparse "datelinep" lift $ optional orgheadingprefixp date <- datep desc <- T.strip <$> lift descriptionp (comment, tags) <- lift transactioncommentp -- lift $ traceparse' "datelinep end" return (date, desc, comment, tags) -- | Zero or more empty lines or hash/semicolon comment lines -- or org headlines which do not start a new day. commentlinesp :: JournalParser m () commentlinesp = do lift $ traceparse "commentlinesp" void $ many $ try $ lift $ emptyorcommentlinep "#;" -- orgnondatelinep :: JournalParser m () -- orgnondatelinep = do -- lift $ traceparse "orgnondatelinep" -- lift orgheadingprefixp -- notFollowedBy datelinep -- void $ lift restofline -- lift $ traceparse' "orgnondatelinep" orgheadingprefixp = do -- traceparse "orgheadingprefixp" skipSome (char '*') >> skipNonNewlineSpaces1 -- | Parse a single timedot entry to one (dateless) transaction. -- @ -- fos.haskell .... .. -- @ timedotentryp :: JournalParser m Posting timedotentryp = do lift $ traceparse "timedotentryp" notFollowedBy datelinep lift $ optional $ choice [orgheadingprefixp, skipNonNewlineSpaces1] a <- modifiedaccountnamep lift skipNonNewlineSpaces (hours, comment, tags) <- try (do (c,ts) <- lift transactioncommentp -- or postingp, but let's not bother supporting date:/date2: return (0, c, ts) ) <|> (do h <- lift durationp (c,ts) <- try (lift transactioncommentp) <|> (newline >> return ("",[])) return (h,c,ts) ) mcs <- getDefaultCommodityAndStyle let (c,s) = case mcs of Just (defc,defs) -> (defc, defs{asprecision=max (asprecision defs) (Precision 2)}) _ -> ("", amountstyle{asprecision=Precision 2}) -- lift $ traceparse' "timedotentryp end" return $ nullposting{paccount=a ,pamount=mixedAmount $ nullamt{acommodity=c, aquantity=hours, astyle=s} ,ptype=VirtualPosting ,pcomment=comment ,ptags=tags } type Hours = Quantity durationp :: TextParser m Hours durationp = do traceparse "durationp" try numericquantityp <|> dotquantityp -- <* traceparse' "durationp" -- | Parse a duration of seconds, minutes, hours, days, weeks, months or years, -- written as a decimal number followed by s, m, h, d, w, mo or y, assuming h -- if there is no unit. Returns the duration as hours, assuming -- 1m = 60s, 1h = 60m, 1d = 24h, 1w = 7d, 1mo = 30d, 1y=365d. -- @ -- 1.5 -- 1.5h -- 90m -- @ numericquantityp :: TextParser m Hours numericquantityp = do -- lift $ traceparse "numericquantityp" (q, _, _, _) <- numberp Nothing msymbol <- optional $ choice $ map (string . fst) timeUnits skipNonNewlineSpaces let q' = case msymbol of Nothing -> q Just sym -> case lookup sym timeUnits of Just mult -> q * mult Nothing -> q -- shouldn't happen.. ignore return q' -- (symbol, equivalent in hours). timeUnits = [("s",2.777777777777778e-4) ,("mo",5040) -- before "m" ,("m",1.6666666666666666e-2) ,("h",1) ,("d",24) ,("w",168) ,("y",61320) ] -- | Parse a quantity written as a line of dots, each representing 0.25. -- @ -- .... .. -- @ dotquantityp :: TextParser m Quantity dotquantityp = do -- lift $ traceparse "dotquantityp" dots <- filter (not.isSpace) <$> many (oneOf (". " :: [Char])) return $ fromIntegral (length dots) / 4 -- | XXX new comment line parser, move to Hledger.Read.Common.emptyorcommentlinep -- Parse empty lines, all-blank lines, and lines beginning with any of the provided -- comment-beginning characters. emptyorcommentlinep :: [Char] -> TextParser m () emptyorcommentlinep cs = label ("empty line or comment line beginning with "++cs) $ do traceparse "emptyorcommentlinep" -- XXX possible to combine label and traceparse ? skipNonNewlineSpaces void newline <|> void commentp traceparse' "emptyorcommentlinep" where commentp = do choice (map (some.char) cs) takeWhileP Nothing (/='\n') <* newline hledger-lib-1.30/Hledger/Read/TimeclockReader.hs0000644000000000000000000001102314434445206017622 0ustar0000000000000000--- * -*- outline-regexp:"--- \\*"; -*- --- ** doc -- In Emacs, use TAB on lines beginning with "-- *" to collapse/expand sections. {-| A reader for the timeclock file format generated by timeclock.el (). Example: @ i 2007\/03\/10 12:26:00 hledger o 2007\/03\/10 17:26:02 @ From timeclock.el 2.6: @ A timeclock contains data in the form of a single entry per line. Each entry has the form: CODE YYYY/MM/DD HH:MM:SS [COMMENT] CODE is one of: b, h, i, o or O. COMMENT is optional when the code is i, o or O. The meanings of the codes are: b Set the current time balance, or \"time debt\". Useful when archiving old log data, when a debt must be carried forward. The COMMENT here is the number of seconds of debt. h Set the required working time for the given day. This must be the first entry for that day. The COMMENT in this case is the number of hours in this workday. Floating point amounts are allowed. i Clock in. The COMMENT in this case should be the name of the project worked on. o Clock out. COMMENT is unnecessary, but can be used to provide a description of how the period went, for example. O Final clock out. Whatever project was being worked on, it is now finished. Useful for creating summary reports. @ -} --- ** language {-# LANGUAGE OverloadedStrings #-} --- ** exports module Hledger.Read.TimeclockReader ( -- * Reader reader, -- * Misc other exports timeclockfilep, ) where --- ** imports import Control.Monad import Control.Monad.Except (ExceptT, liftEither) import Control.Monad.State.Strict import Data.Maybe (fromMaybe) import Data.Text (Text) import Text.Megaparsec hiding (parse) import Hledger.Data -- XXX too much reuse ? import Hledger.Read.Common import Hledger.Utils import Data.Text as T (strip) --- ** doctest setup -- $setup -- >>> :set -XOverloadedStrings --- ** reader reader :: MonadIO m => Reader m reader = Reader {rFormat = "timeclock" ,rExtensions = ["timeclock"] ,rReadFn = parse ,rParser = timeclockfilep } -- | Parse and post-process a "Journal" from timeclock.el's timeclock -- format, saving the provided file path and the current time, or give an -- error. parse :: InputOpts -> FilePath -> Text -> ExceptT String IO Journal parse iopts fp t = initialiseAndParseJournal timeclockfilep iopts fp t >>= liftEither . journalApplyAliases (aliasesFromOpts iopts) >>= journalFinalise iopts fp t --- ** parsers timeclockfilep :: MonadIO m => JournalParser m ParsedJournal timeclockfilep = do many timeclockitemp eof j@Journal{jparsetimeclockentries=es} <- get -- Convert timeclock entries in this journal to transactions, closing any unfinished sessions. -- Doing this here rather than in journalFinalise means timeclock sessions can't span file boundaries, -- but it simplifies code above. now <- liftIO getCurrentLocalTime -- entries have been parsed in reverse order. timeclockEntriesToTransactions -- expects them to be in normal order, then we must reverse again since -- journalFinalise expects them in reverse order let j' = j{jtxns = reverse $ timeclockEntriesToTransactions now $ reverse es, jparsetimeclockentries = []} return j' where -- As all ledger line types can be distinguished by the first -- character, excepting transactions versus empty (blank or -- comment-only) lines, can use choice w/o try timeclockitemp = choice [ void (lift emptyorcommentlinep) , timeclockentryp >>= \e -> modify' (\j -> j{jparsetimeclockentries = e : jparsetimeclockentries j}) ] "timeclock entry, comment line, or empty line" -- | Parse a timeclock entry. timeclockentryp :: JournalParser m TimeclockEntry timeclockentryp = do pos <- getSourcePos code <- oneOf ("bhioO" :: [Char]) lift skipNonNewlineSpaces1 datetime <- datetimep account <- fmap (fromMaybe "") $ optional $ lift skipNonNewlineSpaces1 >> modifiedaccountnamep description <- fmap (maybe "" T.strip) $ optional $ lift $ skipNonNewlineSpaces1 >> descriptionp (comment, tags) <- lift transactioncommentp return $ TimeclockEntry pos (read [code]) datetime account description comment tags hledger-lib-1.30/Hledger/Reports.hs0000644000000000000000000000234414434445206015356 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-| Generate several common kinds of report from a journal, as \"*Report\" - simple intermediate data structures intended to be easily rendered as text, html, json, csv etc. by hledger commands, hamlet templates, javascript, or whatever. -} module Hledger.Reports ( module Hledger.Reports.ReportOptions, module Hledger.Reports.ReportTypes, module Hledger.Reports.EntriesReport, module Hledger.Reports.PostingsReport, module Hledger.Reports.AccountTransactionsReport, module Hledger.Reports.BalanceReport, module Hledger.Reports.MultiBalanceReport, module Hledger.Reports.BudgetReport, -- * Tests tests_Reports ) where import Test.Tasty (testGroup) import Hledger.Reports.ReportOptions import Hledger.Reports.ReportTypes import Hledger.Reports.AccountTransactionsReport import Hledger.Reports.EntriesReport import Hledger.Reports.PostingsReport import Hledger.Reports.BalanceReport import Hledger.Reports.MultiBalanceReport import Hledger.Reports.BudgetReport tests_Reports = testGroup "Reports" [ tests_BalanceReport ,tests_BudgetReport ,tests_AccountTransactionsReport ,tests_EntriesReport ,tests_MultiBalanceReport ,tests_PostingsReport ] hledger-lib-1.30/Hledger/Reports/ReportOptions.hs0000644000000000000000000011531614436245522020212 0ustar0000000000000000{-| Options common to most hledger reports. -} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE TemplateHaskell #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} module Hledger.Reports.ReportOptions ( ReportOpts(..), HasReportOptsNoUpdate(..), HasReportOpts(..), ReportSpec(..), HasReportSpec(..), overEither, setEither, BalanceCalculation(..), BalanceAccumulation(..), AccountListMode(..), ValuationType(..), Layout(..), defreportopts, rawOptsToReportOpts, defreportspec, setDefaultConversionOp, reportOptsToSpec, updateReportSpec, updateReportSpecWith, rawOptsToReportSpec, balanceAccumulationOverride, flat_, tree_, reportOptsToggleStatus, simplifyStatuses, whichDate, journalValueAndFilterPostings, journalValueAndFilterPostingsWith, journalApplyValuationFromOpts, journalApplyValuationFromOptsWith, mixedAmountApplyValuationAfterSumFromOptsWith, valuationAfterSum, intervalFromRawOpts, queryFromFlags, transactionDateFn, postingDateFn, reportSpan, reportSpanBothDates, reportStartDate, reportEndDate, reportPeriodStart, reportPeriodOrJournalStart, reportPeriodLastDay, reportPeriodOrJournalLastDay, reportPeriodName ) where import Prelude hiding (Applicative(..)) import Control.Applicative (Applicative(..), Const(..), (<|>)) import Control.Monad ((<=<), guard, join) import Data.Char (toLower) import Data.Either (fromRight) import Data.Either.Extra (eitherToMaybe) import Data.Functor.Identity (Identity(..)) import Data.List.Extra (find, isPrefixOf, nubSort) import Data.Maybe (fromMaybe, isJust, isNothing) import qualified Data.Text as T import Data.Time.Calendar (Day, addDays) import Data.Default (Default(..)) import Safe (headMay, lastDef, lastMay, maximumMay, readMay) import Text.Megaparsec.Custom import Hledger.Data import Hledger.Query import Hledger.Utils -- | What to calculate for each cell in a balance report. -- "Balance report types -> Calculation type" in the hledger manual. data BalanceCalculation = CalcChange -- ^ Sum of posting amounts in the period. | CalcBudget -- ^ Sum of posting amounts and the goal for the period. | CalcValueChange -- ^ Change from previous period's historical end value to this period's historical end value. | CalcGain -- ^ Change from previous period's gain, i.e. valuation minus cost basis. | CalcPostingsCount -- ^ Number of postings in the period. deriving (Eq, Show) instance Default BalanceCalculation where def = CalcChange -- | How to accumulate calculated values across periods (columns) in a balance report. -- "Balance report types -> Accumulation type" in the hledger manual. data BalanceAccumulation = PerPeriod -- ^ No accumulation. Eg, shows the change of balance in each period. | Cumulative -- ^ Accumulate changes across periods, starting from zero at report start. | Historical -- ^ Accumulate changes across periods, including any from before report start. -- Eg, shows the historical end balance of each period. deriving (Eq,Show) instance Default BalanceAccumulation where def = PerPeriod -- | Should accounts be displayed: in the command's default style, hierarchically, or as a flat list ? data AccountListMode = ALFlat | ALTree deriving (Eq, Show) instance Default AccountListMode where def = ALFlat data Layout = LayoutWide (Maybe Int) | LayoutTall | LayoutBare | LayoutTidy deriving (Eq, Show) -- | Standard options for customising report filtering and output. -- Most of these correspond to standard hledger command-line options -- or query arguments, but not all. Some are used only by certain -- commands, as noted below. data ReportOpts = ReportOpts { -- for most reports: period_ :: Period ,interval_ :: Interval ,statuses_ :: [Status] -- ^ Zero, one, or two statuses to be matched ,conversionop_ :: Maybe ConversionOp -- ^ Which operation should we apply to conversion transactions? ,value_ :: Maybe ValuationType -- ^ What value should amounts be converted to ? ,infer_prices_ :: Bool -- ^ Infer market prices from transactions ? ,depth_ :: Maybe Int ,date2_ :: Bool ,empty_ :: Bool ,no_elide_ :: Bool ,real_ :: Bool ,format_ :: StringFormat ,pretty_ :: Bool ,querystring_ :: [T.Text] -- ,average_ :: Bool -- for posting reports (register) ,related_ :: Bool -- for account transactions reports (aregister) ,txn_dates_ :: Bool -- for balance reports (bal, bs, cf, is) ,balancecalc_ :: BalanceCalculation -- ^ What to calculate in balance report cells ,balanceaccum_ :: BalanceAccumulation -- ^ How to accumulate balance report values over time ,budgetpat_ :: Maybe T.Text -- ^ A case-insensitive description substring -- to select periodic transactions for budget reports. -- (Not a regexp, nor a full hledger query, for now.) ,accountlistmode_ :: AccountListMode ,drop_ :: Int ,declared_ :: Bool -- ^ Include accounts declared but not yet posted to ? ,row_total_ :: Bool ,no_total_ :: Bool ,show_costs_ :: Bool -- ^ Show costs for reports which normally don't show them ? ,sort_amount_ :: Bool ,percent_ :: Bool ,invert_ :: Bool -- ^ Flip all amount signs in reports ? ,normalbalance_ :: Maybe NormalSign -- ^ This can be set when running balance reports on a set of accounts -- with the same normal balance type (eg all assets, or all incomes). -- - It helps --sort-amount know how to sort negative numbers -- (eg in the income section of an income statement) -- - It helps compound balance report commands (is, bs etc.) do -- sign normalisation, converting normally negative subreports to -- normally positive for a more conventional display. ,color_ :: Bool -- ^ Whether to use ANSI color codes in text output. -- Influenced by the --color/colour flag (cf CliOptions), -- whether stdout is an interactive terminal, and the value of -- TERM and existence of NO_COLOR environment variables. ,transpose_ :: Bool ,layout_ :: Layout } deriving (Show) instance Default ReportOpts where def = defreportopts defreportopts :: ReportOpts defreportopts = ReportOpts { period_ = PeriodAll , interval_ = NoInterval , statuses_ = [] , conversionop_ = Nothing , value_ = Nothing , infer_prices_ = False , depth_ = Nothing , date2_ = False , empty_ = False , no_elide_ = False , real_ = False , format_ = def , pretty_ = False , querystring_ = [] , average_ = False , related_ = False , txn_dates_ = False , balancecalc_ = def , balanceaccum_ = def , budgetpat_ = Nothing , accountlistmode_ = ALFlat , drop_ = 0 , declared_ = False , row_total_ = False , no_total_ = False , show_costs_ = False , sort_amount_ = False , percent_ = False , invert_ = False , normalbalance_ = Nothing , color_ = False , transpose_ = False , layout_ = LayoutWide Nothing } -- | Generate a ReportOpts from raw command-line input, given a day. -- This will fail with a usage error if it is passed -- - an invalid --format argument, -- - an invalid --value argument, -- - if --valuechange is called with a valuation type other than -V/--value=end. -- - an invalid --pretty argument, rawOptsToReportOpts :: Day -> RawOpts -> ReportOpts rawOptsToReportOpts d rawopts = let formatstring = T.pack <$> maybestringopt "format" rawopts querystring = map T.pack $ listofstringopt "args" rawopts -- doesn't handle an arg like "" right pretty = fromMaybe False $ alwaysneveropt "pretty" rawopts format = case parseStringFormat <$> formatstring of Nothing -> defaultBalanceLineFormat Just (Right x) -> x Just (Left err) -> usageError $ "could not parse format option: " ++ err in defreportopts {period_ = periodFromRawOpts d rawopts ,interval_ = intervalFromRawOpts rawopts ,statuses_ = statusesFromRawOpts rawopts ,conversionop_ = conversionOpFromRawOpts rawopts ,value_ = valuationTypeFromRawOpts rawopts ,infer_prices_ = boolopt "infer-market-prices" rawopts ,depth_ = maybeposintopt "depth" rawopts ,date2_ = boolopt "date2" rawopts ,empty_ = boolopt "empty" rawopts ,no_elide_ = boolopt "no-elide" rawopts ,real_ = boolopt "real" rawopts ,format_ = format ,querystring_ = querystring ,average_ = boolopt "average" rawopts ,related_ = boolopt "related" rawopts ,txn_dates_ = boolopt "txn-dates" rawopts ,balancecalc_ = balancecalcopt rawopts ,balanceaccum_ = balanceaccumopt rawopts ,budgetpat_ = maybebudgetpatternopt rawopts ,accountlistmode_ = accountlistmodeopt rawopts ,drop_ = posintopt "drop" rawopts ,declared_ = boolopt "declared" rawopts ,row_total_ = boolopt "row-total" rawopts ,no_total_ = boolopt "no-total" rawopts ,show_costs_ = boolopt "show-costs" rawopts ,sort_amount_ = boolopt "sort-amount" rawopts ,percent_ = boolopt "percent" rawopts ,invert_ = boolopt "invert" rawopts ,pretty_ = pretty ,color_ = useColorOnStdout -- a lower-level helper ,transpose_ = boolopt "transpose" rawopts ,layout_ = layoutopt rawopts } -- | A fully-determined set of report parameters -- (report options with all partial values made total, eg the begin and end -- dates are known, avoiding date/regex errors; plus the reporting date), -- and the query successfully calculated from them. -- -- If you change the report options or date in one of these, you should -- use `reportOptsToSpec` to regenerate the whole thing, avoiding inconsistency. -- data ReportSpec = ReportSpec { _rsReportOpts :: ReportOpts -- ^ The underlying ReportOpts used to generate this ReportSpec , _rsDay :: Day -- ^ The Day this ReportSpec is generated for , _rsQuery :: Query -- ^ The generated Query for the given day , _rsQueryOpts :: [QueryOpt] -- ^ A list of QueryOpts for the given day } deriving (Show) instance Default ReportSpec where def = defreportspec defreportspec :: ReportSpec defreportspec = ReportSpec { _rsReportOpts = def , _rsDay = nulldate , _rsQuery = Any , _rsQueryOpts = [] } -- | Set the default ConversionOp. setDefaultConversionOp :: ConversionOp -> ReportSpec -> ReportSpec setDefaultConversionOp defop rspec@ReportSpec{_rsReportOpts=ropts} = rspec{_rsReportOpts=ropts{conversionop_=conversionop_ ropts <|> Just defop}} accountlistmodeopt :: RawOpts -> AccountListMode accountlistmodeopt = fromMaybe ALFlat . choiceopt parse where parse = \case "tree" -> Just ALTree "flat" -> Just ALFlat _ -> Nothing -- Get the argument of the --budget option if any, or the empty string. maybebudgetpatternopt :: RawOpts -> Maybe T.Text maybebudgetpatternopt = fmap T.pack . maybestringopt "budget" balancecalcopt :: RawOpts -> BalanceCalculation balancecalcopt = fromMaybe CalcChange . choiceopt parse where parse = \case "sum" -> Just CalcChange "valuechange" -> Just CalcValueChange "gain" -> Just CalcGain "budget" -> Just CalcBudget "count" -> Just CalcPostingsCount _ -> Nothing balanceaccumopt :: RawOpts -> BalanceAccumulation balanceaccumopt = fromMaybe PerPeriod . balanceAccumulationOverride alwaysneveropt :: String -> RawOpts -> Maybe Bool alwaysneveropt opt rawopts = case maybestringopt opt rawopts of Just "always" -> Just True Just "yes" -> Just True Just "y" -> Just True Just "never" -> Just False Just "no" -> Just False Just "n" -> Just False Just _ -> usageError "--pretty's argument should be \"yes\" or \"no\" (or y, n, always, never)" _ -> Nothing balanceAccumulationOverride :: RawOpts -> Maybe BalanceAccumulation balanceAccumulationOverride rawopts = choiceopt parse rawopts <|> reportbal where parse = \case "historical" -> Just Historical "cumulative" -> Just Cumulative "change" -> Just PerPeriod _ -> Nothing reportbal = case balancecalcopt rawopts of CalcValueChange -> Just PerPeriod _ -> Nothing layoutopt :: RawOpts -> Layout layoutopt rawopts = fromMaybe (LayoutWide Nothing) $ layout <|> column where layout = parse <$> maybestringopt "layout" rawopts column = LayoutBare <$ guard (boolopt "commodity-column" rawopts) parse opt = maybe err snd $ guard (not $ null s) *> find (isPrefixOf s . fst) checkNames where checkNames = [ ("wide", LayoutWide w) , ("tall", LayoutTall) , ("bare", LayoutBare) , ("tidy", LayoutTidy) ] -- For `--layout=elided,n`, elide to the given width (s,n) = break (==',') $ map toLower opt w = case drop 1 n of "" -> Nothing c | Just w' <- readMay c -> Just w' _ -> usageError "width in --layout=wide,WIDTH must be an integer" err = usageError "--layout's argument should be \"wide[,WIDTH]\", \"tall\", \"bare\", or \"tidy\"" -- Get the period specified by any -b/--begin, -e/--end and/or -p/--period -- options appearing in the command line. -- Its bounds are the rightmost begin date specified by a -b or -p, and -- the rightmost end date specified by a -e or -p. Cf #1011. -- Today's date is provided to help interpret any relative dates. periodFromRawOpts :: Day -> RawOpts -> Period periodFromRawOpts d rawopts = case (mlastb, mlaste) of (Nothing, Nothing) -> PeriodAll (Just b, Nothing) -> PeriodFrom b (Nothing, Just e) -> PeriodTo e (Just b, Just e) -> simplifyPeriod $ PeriodBetween b e where mlastb = case beginDatesFromRawOpts d rawopts of [] -> Nothing bs -> Just $ fromEFDay $ last bs mlaste = case endDatesFromRawOpts d rawopts of [] -> Nothing es -> Just $ fromEFDay $ last es -- Get all begin dates specified by -b/--begin or -p/--period options, in order, -- using the given date to interpret relative date expressions. beginDatesFromRawOpts :: Day -> RawOpts -> [EFDay] beginDatesFromRawOpts d = collectopts (begindatefromrawopt d) where begindatefromrawopt d' (n,v) | n == "begin" = either (\e -> usageError $ "could not parse "++n++" date: "++customErrorBundlePretty e) Just $ fixSmartDateStrEither' d' (T.pack v) | n == "period" = case either (\e -> usageError $ "could not parse period option: "++customErrorBundlePretty e) id $ parsePeriodExpr d' (stripquotes $ T.pack v) of (_, DateSpan (Just b) _) -> Just b _ -> Nothing | otherwise = Nothing -- Get all end dates specified by -e/--end or -p/--period options, in order, -- using the given date to interpret relative date expressions. endDatesFromRawOpts :: Day -> RawOpts -> [EFDay] endDatesFromRawOpts d = collectopts (enddatefromrawopt d) where enddatefromrawopt d' (n,v) | n == "end" = either (\e -> usageError $ "could not parse "++n++" date: "++customErrorBundlePretty e) Just $ fixSmartDateStrEither' d' (T.pack v) | n == "period" = case either (\e -> usageError $ "could not parse period option: "++customErrorBundlePretty e) id $ parsePeriodExpr d' (stripquotes $ T.pack v) of (_, DateSpan _ (Just e)) -> Just e _ -> Nothing | otherwise = Nothing -- | Get the report interval, if any, specified by the last of -p/--period, -- -D/--daily, -W/--weekly, -M/--monthly etc. options. -- An interval from --period counts only if it is explicitly defined. intervalFromRawOpts :: RawOpts -> Interval intervalFromRawOpts = lastDef NoInterval . collectopts intervalfromrawopt where intervalfromrawopt (n,v) | n == "period" = either (\e -> usageError $ "could not parse period option: "++customErrorBundlePretty e) extractIntervalOrNothing $ parsePeriodExpr (error' "intervalFromRawOpts: did not expect to need today's date here") -- PARTIAL: should not happen; we are just getting the interval, which does not use the reference date (stripquotes $ T.pack v) | n == "daily" = Just $ Days 1 | n == "weekly" = Just $ Weeks 1 | n == "monthly" = Just $ Months 1 | n == "quarterly" = Just $ Quarters 1 | n == "yearly" = Just $ Years 1 | otherwise = Nothing -- | Extract the interval from the parsed -p/--period expression. -- Return Nothing if an interval is not explicitly defined. extractIntervalOrNothing :: (Interval, DateSpan) -> Maybe Interval extractIntervalOrNothing (NoInterval, _) = Nothing extractIntervalOrNothing (interval, _) = Just interval -- | Get any statuses to be matched, as specified by -U/--unmarked, -- -P/--pending, -C/--cleared flags. -UPC is equivalent to no flags, -- so this returns a list of 0-2 unique statuses. statusesFromRawOpts :: RawOpts -> [Status] statusesFromRawOpts = simplifyStatuses . collectopts statusfromrawopt where statusfromrawopt (n,_) | n == "unmarked" = Just Unmarked | n == "pending" = Just Pending | n == "cleared" = Just Cleared | otherwise = Nothing -- | Reduce a list of statuses to just one of each status, -- and if all statuses are present return the empty list. simplifyStatuses l | length l' >= numstatuses = [] | otherwise = l' where l' = nubSort l numstatuses = length [minBound .. maxBound :: Status] -- | Add/remove this status from the status list. Used by hledger-ui. reportOptsToggleStatus s ropts@ReportOpts{statuses_=ss} | s `elem` ss = ropts{statuses_=filter (/= s) ss} | otherwise = ropts{statuses_=simplifyStatuses (s:ss)} -- | Parse the type of valuation to be performed, if any, specified by -V, -- -X/--exchange, or --value flags. If there's more than one valuation type, -- the rightmost flag wins. This will fail with a usage error if an invalid -- argument is passed to --value, or if --valuechange is called with a -- valuation type other than -V/--value=end. valuationTypeFromRawOpts :: RawOpts -> Maybe ValuationType valuationTypeFromRawOpts rawopts = case (balancecalcopt rawopts, directval) of (CalcValueChange, Nothing ) -> Just $ AtEnd Nothing -- If no valuation requested for valuechange, use AtEnd (CalcValueChange, Just (AtEnd _)) -> directval -- If AtEnd valuation requested, use it (CalcValueChange, _ ) -> usageError "--valuechange only produces sensible results with --value=end" (CalcGain, Nothing ) -> Just $ AtEnd Nothing -- If no valuation requested for gain, use AtEnd (_, _ ) -> directval -- Otherwise, use requested valuation where directval = lastMay $ collectopts valuationfromrawopt rawopts valuationfromrawopt (n,v) -- option name, value | n == "V" = Just $ AtEnd Nothing | n == "X" = Just $ AtEnd (Just $ T.pack v) | n == "value" = valueopt v | otherwise = Nothing valueopt v | t `elem` ["cost","c"] = AtEnd . Just <$> mc -- keep supporting --value=cost,COMM for now | t `elem` ["then" ,"t"] = Just $ AtThen mc | t `elem` ["end" ,"e"] = Just $ AtEnd mc | t `elem` ["now" ,"n"] = Just $ AtNow mc | otherwise = case parsedateM t of Just d -> Just $ AtDate d mc Nothing -> usageError $ "could not parse \""++t++"\" as valuation type, should be: then|end|now|t|e|n|YYYY-MM-DD" where -- parse --value's value: TYPE[,COMM] (t,c') = break (==',') v mc = case drop 1 c' of "" -> Nothing c -> Just $ T.pack c -- | Parse the type of costing to be performed, if any, specified by -B/--cost -- or --value flags. If there's more than one costing type, the rightmost flag -- wins. This will fail with a usage error if an invalid argument is passed to -- --cost or if a costing type is requested with --gain. conversionOpFromRawOpts :: RawOpts -> Maybe ConversionOp conversionOpFromRawOpts rawopts | isJust costFlag && balancecalcopt rawopts == CalcGain = usageError "--gain cannot be combined with --cost" | otherwise = costFlag where costFlag = lastMay $ collectopts conversionopfromrawopt rawopts conversionopfromrawopt (n,v) -- option name, value | n == "B" = Just ToCost | n == "value", takeWhile (/=',') v `elem` ["cost", "c"] = Just ToCost -- keep supporting --value=cost for now | otherwise = Nothing -- | Select the Transaction date accessor based on --date2. transactionDateFn :: ReportOpts -> (Transaction -> Day) transactionDateFn ReportOpts{..} = if date2_ then transactionDate2 else tdate -- | Select the Posting date accessor based on --date2. postingDateFn :: ReportOpts -> (Posting -> Day) postingDateFn ReportOpts{..} = if date2_ then postingDate2 else postingDate -- | Report which date we will report on based on --date2. whichDate :: ReportOpts -> WhichDate whichDate ReportOpts{..} = if date2_ then SecondaryDate else PrimaryDate -- | Legacy-compatible convenience aliases for accountlistmode_. tree_ :: ReportOpts -> Bool tree_ ReportOpts{accountlistmode_ = ALTree} = True tree_ ReportOpts{accountlistmode_ = ALFlat} = False flat_ :: ReportOpts -> Bool flat_ = not . tree_ -- depthFromOpts :: ReportOpts -> Int -- depthFromOpts opts = min (fromMaybe 99999 $ depth_ opts) (queryDepth $ queryFromOpts nulldate opts) -- | Convert a 'Journal''s amounts to cost and/or to value (see -- 'journalApplyValuationFromOpts'), and filter by the 'ReportSpec' 'Query'. -- -- We make sure to first filter by amt: and cur: terms, then value the -- 'Journal', then filter by the remaining terms. journalValueAndFilterPostings :: ReportSpec -> Journal -> Journal journalValueAndFilterPostings rspec j = journalValueAndFilterPostingsWith rspec j priceoracle where priceoracle = journalPriceOracle (infer_prices_ $ _rsReportOpts rspec) j -- | Like 'journalValueAndFilterPostings', but takes a 'PriceOracle' as an argument. journalValueAndFilterPostingsWith :: ReportSpec -> Journal -> PriceOracle -> Journal journalValueAndFilterPostingsWith rspec@ReportSpec{_rsQuery=q, _rsReportOpts=ropts} j = -- Filter by the remainder of the query filterJournal reportq -- Apply valuation and costing . journalApplyValuationFromOptsWith rspec -- Filter by amount and currency, so it matches pre-valuation/costing (if queryIsNull amtsymq then j else filterJournalAmounts amtsymq j) where -- with -r, replace each posting with its sibling postings filterJournal = if related_ ropts then filterJournalRelatedPostings else filterJournalPostings amtsymq = dbg3 "amtsymq" $ filterQuery queryIsAmtOrSym q reportq = dbg3 "reportq" $ filterQuery (not . queryIsAmtOrSym) q queryIsAmtOrSym = liftA2 (||) queryIsAmt queryIsSym -- | Convert this journal's postings' amounts to cost and/or to value, if specified -- by options (-B/--cost/-V/-X/--value etc.). Strip prices if not needed. This -- should be the main stop for performing costing and valuation. The exception is -- whenever you need to perform valuation _after_ summing up amounts, as in a -- historical balance report with --value=end. valuationAfterSum will check for this -- condition. journalApplyValuationFromOpts :: ReportSpec -> Journal -> Journal journalApplyValuationFromOpts rspec j = journalApplyValuationFromOptsWith rspec j priceoracle where priceoracle = journalPriceOracle (infer_prices_ $ _rsReportOpts rspec) j -- | Like journalApplyValuationFromOpts, but takes PriceOracle as an argument. journalApplyValuationFromOptsWith :: ReportSpec -> Journal -> PriceOracle -> Journal journalApplyValuationFromOptsWith rspec@ReportSpec{_rsReportOpts=ropts} j priceoracle = case balancecalc_ ropts of CalcGain -> journalMapPostings (\p -> postingTransformAmount (gain p) p) j _ -> journalMapPostings (\p -> postingTransformAmount (valuation p) p) $ costing j where valuation p = maybe id (mixedAmountApplyValuation priceoracle styles (postingperiodend p) (_rsDay rspec) (postingDate p)) (value_ ropts) gain p = maybe id (mixedAmountApplyGain priceoracle styles (postingperiodend p) (_rsDay rspec) (postingDate p)) (value_ ropts) costing = journalToCost (fromMaybe NoConversionOp $ conversionop_ ropts) -- Find the end of the period containing this posting postingperiodend = addDays (-1) . fromMaybe err . mPeriodEnd . postingDateOrDate2 (whichDate ropts) mPeriodEnd = case interval_ ropts of NoInterval -> const . spanEnd . fst $ reportSpan j rspec _ -> spanEnd <=< latestSpanContaining (historical : spans) historical = DateSpan Nothing $ (fmap Exact . spanStart) =<< headMay spans spans = snd $ reportSpanBothDates j rspec styles = journalCommodityStyles j err = error "journalApplyValuationFromOpts: expected all spans to have an end date" -- | Select the Account valuation functions required for performing valuation after summing -- amounts. Used in MultiBalanceReport to value historical and similar reports. mixedAmountApplyValuationAfterSumFromOptsWith :: ReportOpts -> Journal -> PriceOracle -> (DateSpan -> MixedAmount -> MixedAmount) mixedAmountApplyValuationAfterSumFromOptsWith ropts j priceoracle = case valuationAfterSum ropts of Just mc -> case balancecalc_ ropts of CalcGain -> gain mc _ -> \spn -> valuation mc spn . costing Nothing -> const id where valuation mc spn = mixedAmountValueAtDate priceoracle styles mc (maybe err (addDays (-1)) $ spanEnd spn) gain mc spn = mixedAmountGainAtDate priceoracle styles mc (maybe err (addDays (-1)) $ spanEnd spn) costing = case fromMaybe NoConversionOp $ conversionop_ ropts of NoConversionOp -> id ToCost -> styleMixedAmount styles . mixedAmountCost styles = journalCommodityStyles j err = error "mixedAmountApplyValuationAfterSumFromOptsWith: expected all spans to have an end date" -- | If the ReportOpts specify that we are performing valuation after summing amounts, -- return Just of the commodity symbol we're converting to, Just Nothing for the default, -- and otherwise return Nothing. -- Used for example with historical reports with --value=end. valuationAfterSum :: ReportOpts -> Maybe (Maybe CommoditySymbol) valuationAfterSum ropts = case value_ ropts of Just (AtEnd mc) | valueAfterSum -> Just mc _ -> Nothing where valueAfterSum = balancecalc_ ropts == CalcValueChange || balancecalc_ ropts == CalcGain || balanceaccum_ ropts /= PerPeriod -- | Convert report options to a query, ignoring any non-flag command line arguments. queryFromFlags :: ReportOpts -> Query queryFromFlags ReportOpts{..} = simplifyQuery $ And flagsq where flagsq = consIf Real real_ . consJust Depth depth_ $ [ (if date2_ then Date2 else Date) $ periodAsDateSpan period_ , Or $ map StatusQ statuses_ ] consIf f b = if b then (f True:) else id consJust f = maybe id ((:) . f) -- Report dates. -- | The effective report span is the start and end dates specified by -- options or queries, or otherwise the earliest and latest transaction or -- posting dates in the journal. If no dates are specified by options/queries -- and the journal is empty, returns the null date span. -- Also return the intervals if they are requested. reportSpan :: Journal -> ReportSpec -> (DateSpan, [DateSpan]) reportSpan = reportSpanHelper False -- | Like reportSpan, but uses both primary and secondary dates when calculating -- the span. reportSpanBothDates :: Journal -> ReportSpec -> (DateSpan, [DateSpan]) reportSpanBothDates = reportSpanHelper True -- | A helper for reportSpan, which takes a Bool indicating whether to use both -- primary and secondary dates. reportSpanHelper :: Bool -> Journal -> ReportSpec -> (DateSpan, [DateSpan]) reportSpanHelper bothdates j ReportSpec{_rsQuery=query, _rsReportOpts=ropts} = (reportspan, intervalspans) where -- The date span specified by -b/-e/-p options and query args if any. requestedspan = dbg3 "requestedspan" $ if bothdates then queryDateSpan' query else queryDateSpan (date2_ ropts) query -- If we are requesting period-end valuation, the journal date span should -- include price directives after the last transaction journalspan = dbg3 "journalspan" $ if bothdates then journalDateSpanBothDates j else journalDateSpan (date2_ ropts) j pricespan = dbg3 "pricespan" . DateSpan Nothing $ case value_ ropts of Just (AtEnd _) -> fmap (Exact . addDays 1) . maximumMay . map pddate $ jpricedirectives j _ -> Nothing -- If the requested span is open-ended, close it using the journal's start and end dates. -- This can still be the null (open) span if the journal is empty. requestedspan' = dbg3 "requestedspan'" $ requestedspan `spanDefaultsFrom` (journalspan `spanUnion` pricespan) -- The list of interval spans enclosing the requested span. -- This list can be empty if the journal was empty, -- or if hledger-ui has added its special date:-tomorrow to the query -- and all txns are in the future. intervalspans = dbg3 "intervalspans" $ splitSpan adjust (interval_ ropts) requestedspan' where -- When calculating report periods, we will adjust the start date back to the nearest interval boundary -- unless a start date was specified explicitly. adjust = isNothing $ spanStart requestedspan -- The requested span enlarged to enclose a whole number of intervals. -- This can be the null span if there were no intervals. reportspan = dbg3 "reportspan" $ DateSpan (fmap Exact . spanStart =<< headMay intervalspans) (fmap Exact . spanEnd =<< lastMay intervalspans) reportStartDate :: Journal -> ReportSpec -> Maybe Day reportStartDate j = spanStart . fst . reportSpan j reportEndDate :: Journal -> ReportSpec -> Maybe Day reportEndDate j = spanEnd . fst . reportSpan j -- Some pure alternatives to the above. XXX review/clean up -- Get the report's start date. -- If no report period is specified, will be Nothing. reportPeriodStart :: ReportSpec -> Maybe Day reportPeriodStart = queryStartDate False . _rsQuery -- Get the report's start date, or if no report period is specified, -- the journal's start date (the earliest posting date). If there's no -- report period and nothing in the journal, will be Nothing. reportPeriodOrJournalStart :: ReportSpec -> Journal -> Maybe Day reportPeriodOrJournalStart rspec j = reportPeriodStart rspec <|> journalStartDate False j -- Get the last day of the overall report period. -- This the inclusive end date (one day before the -- more commonly used, exclusive, report end date). -- If no report period is specified, will be Nothing. reportPeriodLastDay :: ReportSpec -> Maybe Day reportPeriodLastDay = fmap (addDays (-1)) . queryEndDate False . _rsQuery -- Get the last day of the overall report period, or if no report -- period is specified, the last day of the journal (ie the latest -- posting date). If we're doing period-end valuation, include price -- directive dates. If there's no report period and nothing in the -- journal, will be Nothing. reportPeriodOrJournalLastDay :: ReportSpec -> Journal -> Maybe Day reportPeriodOrJournalLastDay rspec j = reportPeriodLastDay rspec <|> journalOrPriceEnd where journalOrPriceEnd = case value_ $ _rsReportOpts rspec of Just (AtEnd _) -> max (journalLastDay False j) lastPriceDirective _ -> journalLastDay False j lastPriceDirective = fmap (addDays 1) . maximumMay . map pddate $ jpricedirectives j -- | Make a name for the given period in a multiperiod report, given -- the type of balance being reported and the full set of report -- periods. This will be used as a column heading (or row heading, in -- a register summary report). We try to pick a useful name as follows: -- -- - ending-balance reports: the period's end date -- -- - balance change reports where the periods are months and all in the same year: -- the short month name in the current locale -- -- - all other balance change reports: a description of the datespan, -- abbreviated to compact form if possible (see showDateSpan). reportPeriodName :: BalanceAccumulation -> [DateSpan] -> DateSpan -> T.Text reportPeriodName balanceaccumulation spans = case balanceaccumulation of PerPeriod -> if multiyear then showDateSpan else showDateSpanMonthAbbrev where multiyear = (>1) $ length $ nubSort $ map spanStartYear spans _ -> maybe "" (showDate . prevday) . spanEnd -- lenses -- Reportable functors are so that we can create special lenses which can fail -- and report on their failure. class Functor f => Reportable f e where report :: a -> f (Either e a) -> f a instance Reportable (Const r) e where report _ (Const x) = Const x instance Reportable Identity e where report a (Identity i) = Identity $ fromRight a i instance Reportable Maybe e where report _ = (eitherToMaybe =<<) instance (e ~ a) => Reportable (Either a) e where report _ = join -- | Apply a function over a lens, but report on failure. overEither :: ((a -> Either e b) -> s -> Either e t) -> (a -> b) -> s -> Either e t overEither l f = l (pure . f) -- | Set a field using a lens, but report on failure. setEither :: ((a -> Either e b) -> s -> Either e t) -> b -> s -> Either e t setEither l = overEither l . const type ReportableLens' s a = forall f. Reportable f String => (a -> f a) -> s -> f s -- | Lenses for ReportOpts. -- Implement HasReportOptsNoUpdate, the basic lenses for ReportOpts. makeHledgerClassyLenses ''ReportOpts makeHledgerClassyLenses ''ReportSpec -- | Special lenses for ReportOpts which also update the Query and QueryOpts in ReportSpec. -- Note that these are not true lenses, as they have a further restriction on -- the functor. This will work as a normal lens for all common uses, but since they -- don't obey the lens laws for some fancy cases, they may fail in some exotic circumstances. -- -- Note that setEither/overEither should only be necessary with -- querystring and reportOpts: the other lenses should never fail. -- -- === Examples: -- >>> import Lens.Micro (set) -- >>> _rsQuery <$> setEither querystring ["assets"] defreportspec -- Right (Acct (RegexpCI "assets")) -- >>> _rsQuery <$> setEither querystring ["(assets"] defreportspec -- Left "This regular expression is malformed, please correct it:\n(assets" -- >>> _rsQuery $ set querystring ["assets"] defreportspec -- Acct (RegexpCI "assets") -- >>> _rsQuery $ set querystring ["(assets"] defreportspec -- *** Exception: Error: Updating ReportSpec failed: try using overEither instead of over or setEither instead of set -- >>> _rsQuery $ set period (MonthPeriod 2021 08) defreportspec -- Date DateSpan 2021-08 class HasReportOptsNoUpdate a => HasReportOpts a where reportOpts :: ReportableLens' a ReportOpts reportOpts = reportOptsNoUpdate {-# INLINE reportOpts #-} -- XXX these names are a bit clashy period :: ReportableLens' a Period period = reportOpts.periodNoUpdate {-# INLINE period #-} statuses :: ReportableLens' a [Status] statuses = reportOpts.statusesNoUpdate {-# INLINE statuses #-} depth :: ReportableLens' a (Maybe Int) depth = reportOpts.depthNoUpdate {-# INLINE depth #-} date2 :: ReportableLens' a Bool date2 = reportOpts.date2NoUpdate {-# INLINE date2 #-} real :: ReportableLens' a Bool real = reportOpts.realNoUpdate {-# INLINE real #-} querystring :: ReportableLens' a [T.Text] querystring = reportOpts.querystringNoUpdate {-# INLINE querystring #-} instance HasReportOpts ReportOpts instance HasReportOptsNoUpdate ReportSpec where reportOptsNoUpdate = rsReportOpts instance HasReportOpts ReportSpec where reportOpts f rspec = report (error' "Updating ReportSpec failed: try using overEither instead of over or setEither instead of set") $ -- PARTIAL: reportOptsToSpec (_rsDay rspec) <$> f (_rsReportOpts rspec) {-# INLINE reportOpts #-} -- | Generate a ReportSpec from a set of ReportOpts on a given day. reportOptsToSpec :: Day -> ReportOpts -> Either String ReportSpec reportOptsToSpec day ropts = do (argsquery, queryopts) <- parseQueryList day $ querystring_ ropts return ReportSpec { _rsReportOpts = ropts , _rsDay = day , _rsQuery = simplifyQuery $ And [queryFromFlags ropts, argsquery] , _rsQueryOpts = queryopts } -- | Update the ReportOpts and the fields derived from it in a ReportSpec, -- or return an error message if there is a problem such as missing or -- unparseable options data. This is the safe way to change a ReportSpec, -- ensuring that all fields (_rsQuery, _rsReportOpts, querystring_, etc.) are in sync. updateReportSpec :: ReportOpts -> ReportSpec -> Either String ReportSpec updateReportSpec = setEither reportOpts -- | Like updateReportSpec, but takes a ReportOpts-modifying function. updateReportSpecWith :: (ReportOpts -> ReportOpts) -> ReportSpec -> Either String ReportSpec updateReportSpecWith = overEither reportOpts -- | Generate a ReportSpec from RawOpts and a provided day, or return an error -- string if there are regular expression errors. rawOptsToReportSpec :: Day -> RawOpts -> Either String ReportSpec rawOptsToReportSpec day = reportOptsToSpec day . rawOptsToReportOpts day hledger-lib-1.30/Hledger/Reports/ReportTypes.hs0000644000000000000000000002033414434445206017655 0ustar0000000000000000{- | New common report types, used by the BudgetReport for now, perhaps all reports later. -} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE DeriveGeneric #-} module Hledger.Reports.ReportTypes ( PeriodicReport(..) , PeriodicReportRow(..) , Percentage , Change , Balance , Total , Average , periodicReportSpan , prMapName , prMapMaybeName , CompoundPeriodicReport(..) , CBCSubreportSpec(..) , DisplayName(..) , flatDisplayName , treeDisplayName , prrFullName , prrDisplayName , prrDepth , prrAdd ) where import Data.Aeson (ToJSON(..)) import Data.Bifunctor (Bifunctor(..)) import Data.Decimal (Decimal) import Data.Maybe (mapMaybe) import Data.Text (Text) import GHC.Generics (Generic) import Hledger.Data import Hledger.Query (Query) import Hledger.Reports.ReportOptions (ReportOpts) type Percentage = Decimal type Change = MixedAmount -- ^ A change in balance during a certain period. type Balance = MixedAmount -- ^ An ending balance as of some date. type Total = MixedAmount -- ^ The sum of 'Change's in a report or a report row. Does not make sense for 'Balance's. type Average = MixedAmount -- ^ The average of 'Change's or 'Balance's in a report or report row. -- | A periodic report is a generic tabular report, where each row corresponds -- to some label (usually an account name) and each column to a date period. -- The column periods are usually consecutive subperiods formed by splitting -- the overall report period by some report interval (daily, weekly, etc.). -- It has: -- -- 1. a list of each column's period (date span) -- -- 2. a list of rows, each containing: -- -- * an account label -- -- * the account's depth -- -- * A list of amounts, one for each column. Depending on the value type, -- these can represent balance changes, ending balances, budget -- performance, etc. (for example, see 'BalanceAccumulation' and -- "Hledger.Cli.Commands.Balance"). -- -- * the total of the row's amounts for a periodic report, -- or zero for cumulative/historical reports (since summing -- end balances generally doesn't make sense). -- -- * the average of the row's amounts -- -- 3. the column totals, and the overall grand total (or zero for -- cumulative/historical reports) and grand average. data PeriodicReport a b = PeriodicReport { prDates :: [DateSpan] -- The subperiods formed by splitting the overall -- report period by the report interval. For -- ending-balance reports, only the end date is -- significant. Usually displayed as report columns. , prRows :: [PeriodicReportRow a b] -- One row per account in the report. , prTotals :: PeriodicReportRow () b -- The grand totals row. } deriving (Show, Functor, Generic, ToJSON) instance Bifunctor PeriodicReport where bimap f g pr = pr{prRows = map (bimap f g) $ prRows pr, prTotals = g <$> prTotals pr} data PeriodicReportRow a b = PeriodicReportRow { prrName :: a -- An account name. , prrAmounts :: [b] -- The data value for each subperiod. , prrTotal :: b -- The total of this row's values. , prrAverage :: b -- The average of this row's values. } deriving (Show, Functor, Generic, ToJSON) instance Bifunctor PeriodicReportRow where first f prr = prr{prrName = f $ prrName prr} second = fmap instance Semigroup b => Semigroup (PeriodicReportRow a b) where (<>) = prrAdd -- | Add two 'PeriodicReportRows', preserving the name of the first. prrAdd :: Semigroup b => PeriodicReportRow a b -> PeriodicReportRow a b -> PeriodicReportRow a b prrAdd (PeriodicReportRow n1 amts1 t1 a1) (PeriodicReportRow _ amts2 t2 a2) = PeriodicReportRow n1 (zipWithPadded (<>) amts1 amts2) (t1 <> t2) (a1 <> a2) -- | Version of 'zipWith' which will not end on the shortest list, but will copy the rest of the longer list. zipWithPadded :: (a -> a -> a) -> [a] -> [a] -> [a] zipWithPadded f (a:as) (b:bs) = f a b : zipWithPadded f as bs zipWithPadded _ as [] = as zipWithPadded _ [] bs = bs -- | Figure out the overall date span of a PeriodicReport periodicReportSpan :: PeriodicReport a b -> DateSpan periodicReportSpan (PeriodicReport [] _ _) = DateSpan Nothing Nothing periodicReportSpan (PeriodicReport colspans _ _) = DateSpan (fmap Exact . spanStart $ head colspans) (fmap Exact . spanEnd $ last colspans) -- | Map a function over the row names. prMapName :: (a -> b) -> PeriodicReport a c -> PeriodicReport b c prMapName f report = report{prRows = map (prrMapName f) $ prRows report} -- | Map a function over the row names, possibly discarding some. prMapMaybeName :: (a -> Maybe b) -> PeriodicReport a c -> PeriodicReport b c prMapMaybeName f report = report{prRows = mapMaybe (prrMapMaybeName f) $ prRows report} -- | Map a function over the row names of the PeriodicReportRow. prrMapName :: (a -> b) -> PeriodicReportRow a c -> PeriodicReportRow b c prrMapName f row = row{prrName = f $ prrName row} -- | Map maybe a function over the row names of the PeriodicReportRow. prrMapMaybeName :: (a -> Maybe b) -> PeriodicReportRow a c -> Maybe (PeriodicReportRow b c) prrMapMaybeName f row = case f $ prrName row of Nothing -> Nothing Just a -> Just row{prrName = a} -- | A compound balance report has: -- -- * an overall title -- -- * the period (date span) of each column -- -- * one or more named, normal-positive multi balance reports, -- with columns corresponding to the above, and a flag indicating -- whether they increased or decreased the overall totals -- -- * a list of overall totals for each column, and their grand total and average -- -- It is used in compound balance report commands like balancesheet, -- cashflow and incomestatement. data CompoundPeriodicReport a b = CompoundPeriodicReport { cbrTitle :: Text , cbrDates :: [DateSpan] , cbrSubreports :: [(Text, PeriodicReport a b, Bool)] , cbrTotals :: PeriodicReportRow () b } deriving (Show, Functor, Generic, ToJSON) -- | Description of one subreport within a compound balance report. -- Part of a "CompoundBalanceCommandSpec", but also used in hledger-lib. data CBCSubreportSpec a = CBCSubreportSpec { cbcsubreporttitle :: Text -- ^ The title to use for the subreport , cbcsubreportquery :: Query -- ^ The Query to use for the subreport , cbcsubreportoptions :: ReportOpts -> ReportOpts -- ^ A function to transform the ReportOpts used to produce the subreport , cbcsubreporttransform :: PeriodicReport DisplayName MixedAmount -> PeriodicReport a MixedAmount -- ^ A function to transform the result of the subreport , cbcsubreportincreasestotal :: Bool -- ^ Whether the subreport and overall report total are of the same sign (e.g. Assets are normally -- positive in a balance sheet report, as is the overall total. Liabilities are normally of the -- opposite sign.) } -- | A full name, display name, and depth for an account. data DisplayName = DisplayName { displayFull :: AccountName , displayName :: AccountName , displayDepth :: Int } deriving (Show, Eq, Ord) instance ToJSON DisplayName where toJSON = toJSON . displayFull toEncoding = toEncoding . displayFull -- | Construct a flat display name, where the full name is also displayed at -- depth 1 flatDisplayName :: AccountName -> DisplayName flatDisplayName a = DisplayName a a 1 -- | Construct a tree display name, where only the leaf is displayed at its -- given depth treeDisplayName :: AccountName -> DisplayName treeDisplayName a = DisplayName a (accountLeafName a) (accountNameLevel a) -- | Get the full, canonical, name of a PeriodicReportRow tagged by a -- DisplayName. prrFullName :: PeriodicReportRow DisplayName a -> AccountName prrFullName = displayFull . prrName -- | Get the display name of a PeriodicReportRow tagged by a DisplayName. prrDisplayName :: PeriodicReportRow DisplayName a -> AccountName prrDisplayName = displayName . prrName -- | Get the display depth of a PeriodicReportRow tagged by a DisplayName. prrDepth :: PeriodicReportRow DisplayName a -> Int prrDepth = displayDepth . prrName hledger-lib-1.30/Hledger/Reports/AccountTransactionsReport.hs0000644000000000000000000003300614434445206022536 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-| An account-centric transactions report. -} module Hledger.Reports.AccountTransactionsReport ( AccountTransactionsReport, AccountTransactionsReportItem, accountTransactionsReport, accountTransactionsReportItems, transactionRegisterDate, triOrigTransaction, triDate, triAmount, triBalance, triCommodityAmount, triCommodityBalance, accountTransactionsReportByCommodity, tests_AccountTransactionsReport ) where import Data.List (mapAccumR, nub, partition, sortBy) import Data.List.Extra (nubSort) import Data.Maybe (catMaybes) import Data.Ord (Down(..), comparing) import Data.Text (Text) import qualified Data.Text as T import Data.Time.Calendar (Day) import Hledger.Data import Hledger.Query import Hledger.Reports.ReportOptions import Hledger.Utils -- | An account transactions report represents transactions affecting -- a particular account (or possibly several accounts, but we don't -- use that). It is used eg by hledger-ui's and hledger-web's register -- view, and hledger's aregister report, where we want to show one row -- per transaction, in the context of the current account. Report -- items consist of: -- -- - the transaction, unmodified -- -- - the transaction as seen in the context of the current account and query, -- which means: -- -- - the transaction date is set to the "transaction context date": -- the earliest of the transaction date and any other posting dates -- of postings to the current account (matched by the report query). -- -- - the transaction's postings are filtered, excluding any which are not -- matched by the report query -- -- - a text description of the other account(s) posted to/from -- -- - a flag indicating whether there's more than one other account involved -- -- - the total increase/decrease to the current account -- -- - the report transactions' running total after this transaction; -- or if historical balance is requested (-H), the historical running total. -- The historical running total includes transactions from before the -- report start date if one is specified, filtered by the report query. -- The historical running total may or may not be the account's historical -- running balance, depending on the report query. -- -- Items are sorted by transaction register date (the earliest date the transaction -- posts to the current account), most recent first. -- Reporting intervals are currently ignored. -- type AccountTransactionsReport = [AccountTransactionsReportItem] -- line items, one per transaction type AccountTransactionsReportItem = ( Transaction -- the transaction, unmodified ,Transaction -- the transaction, as seen from the current account ,Bool -- is this a split (more than one posting to other accounts) ? ,Text -- a display string describing the other account(s), if any ,MixedAmount -- the amount posted to the current account(s) (or total amount posted) ,MixedAmount -- the register's running total or the current account(s)'s historical balance, after this transaction ) triOrigTransaction (torig,_,_,_,_,_) = torig triDate (_,tacct,_,_,_,_) = tdate tacct triAmount (_,_,_,_,a,_) = a triBalance (_,_,_,_,_,a) = a triCommodityAmount c = filterMixedAmountByCommodity c . triAmount triCommodityBalance c = filterMixedAmountByCommodity c . triBalance accountTransactionsReport :: ReportSpec -> Journal -> Query -> AccountTransactionsReport accountTransactionsReport rspec@ReportSpec{_rsReportOpts=ropts} j thisacctq = items where -- A depth limit should not affect the account transactions report; it should show all transactions in/below this account. -- Queries on currency or amount are also ignored at this stage; they are handled earlier, before valuation. reportq = simplifyQuery $ And [aregisterq, periodq] where aregisterq = filterQuery (not . queryIsCurOrAmt) . filterQuery (not . queryIsDepth) $ _rsQuery rspec periodq = Date . periodAsDateSpan $ period_ ropts amtq = filterQuery queryIsCurOrAmt $ _rsQuery rspec queryIsCurOrAmt q = queryIsSym q || queryIsAmt q wd = whichDate ropts -- Note that within this function, we are only allowed limited -- transformation of the transaction postings: this is due to the need to -- pass the original transactions into accountTransactionsReportItem. -- Generally, we either include a transaction in full, or not at all. -- Do some limited filtering and valuing of the journal's transactions: -- - filter them by the account query if any, -- - discard amounts not matched by the currency and amount query if any, -- - then apply valuation if any. -- Additional reportq filtering, such as date filtering, happens down in -- accountTransactionsReportItem, which discards transactions with no matched postings. acctJournal = -- With most calls we will not require transaction prices past this point, and can get a big -- speed improvement by stripping them early. In some cases, such as in hledger-ui, we still -- want to keep prices around, so we can toggle between cost and no cost quickly. We can use -- the show_costs_ flag to be efficient when we can, and detailed when we have to. (if show_costs_ ropts then id else journalMapPostingAmounts mixedAmountStripPrices) . traceOrLogAtWith 5 (("ts3:\n"++).pshowTransactions.jtxns) -- maybe convert these transactions to cost or value . journalApplyValuationFromOpts rspec . traceOrLogAtWith 5 (("ts2:\n"++).pshowTransactions.jtxns) -- apply any cur:SYM filters in reportq . (if queryIsNull amtq then id else filterJournalAmounts amtq) -- only consider transactions which match thisacctq (possibly excluding postings -- which are not real or have the wrong status) . traceOrLogAt 3 ("thisacctq: "++show thisacctq) $ traceOrLogAtWith 5 (("ts1:\n"++).pshowTransactions.jtxns) j{jtxns = filter (matchesTransaction thisacctq . relevantPostings) $ jtxns j} where relevantPostings | queryIsNull realq && queryIsNull statusq = id | otherwise = filterTransactionPostings . simplifyQuery $ And [realq, statusq] realq = filterQuery queryIsReal reportq statusq = filterQuery queryIsStatus reportq startbal | balanceaccum_ ropts == Historical = sumPostings priorps | otherwise = nullmixedamt where priorps = dbg5 "priorps" . journalPostings $ filterJournalPostings priorq acctJournal priorq = dbg5 "priorq" $ And [thisacctq, tostartdateq, datelessreportq] tostartdateq = case mstartdate of Just _ -> Date (DateSpan Nothing (Exact <$> mstartdate)) Nothing -> None -- no start date specified, there are no prior postings mstartdate = queryStartDate (date2_ ropts) reportq datelessreportq = filterQuery (not . queryIsDateOrDate2) reportq items = accountTransactionsReportItems reportq thisacctq startbal maNegate (journalAccountType j) -- sort by the transaction's register date, then index, for accurate starting balance . traceAtWith 5 (("ts4:\n"++).pshowTransactions.map snd) . sortBy (comparing (Down . fst) <> comparing (Down . tindex . snd)) . map (\t -> (transactionRegisterDate wd reportq thisacctq t, t)) $ jtxns acctJournal pshowTransactions :: [Transaction] -> String pshowTransactions = pshow . map (\t -> unwords [show $ tdate t, T.unpack $ tdescription t]) -- | Generate transactions report items from a list of transactions, -- using the provided user-specified report query, a query specifying -- which account to use as the focus, a starting balance, and a sign-setting -- function. -- Each transaction is accompanied by the date that should be shown for it -- in the report. This is not necessarily the transaction date - see -- transactionRegisterDate. accountTransactionsReportItems :: Query -> Query -> MixedAmount -> (MixedAmount -> MixedAmount) -> (AccountName -> Maybe AccountType) -> [(Day, Transaction)] -> [AccountTransactionsReportItem] accountTransactionsReportItems reportq thisacctq bal signfn accttypefn = catMaybes . snd . mapAccumR (accountTransactionsReportItem reportq thisacctq signfn accttypefn) bal accountTransactionsReportItem :: Query -> Query -> (MixedAmount -> MixedAmount) -> (AccountName -> Maybe AccountType) -> MixedAmount -> (Day, Transaction) -> (MixedAmount, Maybe AccountTransactionsReportItem) accountTransactionsReportItem reportq thisacctq signfn accttypefn bal (d, t) -- 201407: I've lost my grip on this, let's just hope for the best -- 201606: we now calculate change and balance from filtered postings, check this still works well for all callers XXX | null reportps = (bal, Nothing) -- no matched postings in this transaction, skip it | otherwise = (bal', Just (t, tacct{tdate=d}, numotheraccts > 1, otheracctstr, amt, bal')) where tacct@Transaction{tpostings=reportps} = filterTransactionPostingsExtra accttypefn reportq t -- TODO needs to consider --date2, #1731 (thisacctps, otheracctps) = partition (matchesPosting thisacctq) reportps numotheraccts = length $ nub $ map paccount otheracctps otheracctstr | thisacctq == None = summarisePostingAccounts reportps -- no current account ? summarise all matched postings | numotheraccts == 0 = summarisePostingAccounts thisacctps -- only postings to current account ? summarise those | otherwise = summarisePostingAccounts otheracctps -- summarise matched postings to other account(s) -- 202302: Impact of t on thisacct - normally the sum of thisacctps, -- but if they are null it probably means reportq is an account filter -- and we should sum otheracctps instead. -- This fixes hledger areg ACCT ACCT2 (#2007), hopefully it's correct in general. amt | null thisacctps = signfn $ sumPostings otheracctps | otherwise = signfn . maNegate $ sumPostings thisacctps bal' = bal `maPlus` amt -- TODO needs checking, cf #1731 -- | What date should be shown for a transaction in an account register report ? -- This will be in context of a particular account (the "this account" query) -- and any additional report query. It could be: -- -- - if postings are matched by both thisacctq and reportq, the earliest of those -- matched postings' dates (or their secondary dates if --date2 was used) -- -- - the transaction date, or its secondary date if --date2 was used. -- transactionRegisterDate :: WhichDate -> Query -> Query -> Transaction -> Day transactionRegisterDate wd reportq thisacctq t | not $ null thisacctps = minimum $ map (postingDateOrDate2 wd) thisacctps | otherwise = transactionDateOrDate2 wd t where reportps = tpostings $ filterTransactionPostings reportq t thisacctps = filter (matchesPosting thisacctq) reportps -- -- | Generate a short readable summary of some postings, like -- -- "from (negatives) to (positives)". -- summarisePostings :: [Posting] -> String -- summarisePostings ps = -- case (summarisePostingAccounts froms, summarisePostingAccounts tos) of -- ("",t) -> "to "++t -- (f,"") -> "from "++f -- (f,t) -> "from "++f++" to "++t -- where -- (froms,tos) = partition (fromMaybe False . isNegativeMixedAmount . pamount) ps -- | Generate a simplified summary of some postings' accounts. -- To reduce noise, if there are both real and virtual postings, show only the real ones. summarisePostingAccounts :: [Posting] -> Text summarisePostingAccounts ps = T.intercalate ", " . map accountSummarisedName . nub $ map paccount displayps where realps = filter isReal ps displayps | null realps = ps | otherwise = realps -- | Split an account transactions report whose items may involve several commodities, -- into one or more single-commodity account transactions reports. accountTransactionsReportByCommodity :: AccountTransactionsReport -> [(CommoditySymbol, AccountTransactionsReport)] accountTransactionsReportByCommodity tr = [(c, filterAccountTransactionsReportByCommodity c tr) | c <- commodities tr] where commodities = nubSort . map acommodity . concatMap (amounts . triAmount) -- | Remove account transaction report items and item amount (and running -- balance amount) components that don't involve the specified -- commodity. Other item fields such as the transaction are left unchanged. filterAccountTransactionsReportByCommodity :: CommoditySymbol -> AccountTransactionsReport -> AccountTransactionsReport filterAccountTransactionsReportByCommodity comm = fixTransactionsReportItemBalances . concatMap (filterTransactionsReportItemByCommodity comm) where filterTransactionsReportItemByCommodity c (t,t2,s,o,a,bal) | c `elem` cs = [item'] | otherwise = [] where cs = map acommodity $ amounts a item' = (t,t2,s,o,a',bal) a' = filterMixedAmountByCommodity c a fixTransactionsReportItemBalances [] = [] fixTransactionsReportItemBalances [i] = [i] fixTransactionsReportItemBalances items = reverse $ i:(go startbal is) where i:is = reverse items startbal = filterMixedAmountByCommodity comm $ triBalance i go _ [] = [] go bal ((t,t2,s,o,amt,_):is') = (t,t2,s,o,amt,bal'):go bal' is' where bal' = bal `maPlus` amt -- tests tests_AccountTransactionsReport = testGroup "AccountTransactionsReport" [ ] hledger-lib-1.30/Hledger/Reports/BalanceReport.hs0000644000000000000000000002726514434445206020110 0ustar0000000000000000{-| Balance report, used by the balance command. -} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} module Hledger.Reports.BalanceReport ( BalanceReport, BalanceReportItem, balanceReport, flatShowsExclusiveBalance, -- * Tests tests_BalanceReport ) where import Data.Time.Calendar import Hledger.Data import Hledger.Query import Hledger.Utils import Hledger.Reports.MultiBalanceReport (multiBalanceReport) import Hledger.Reports.ReportOptions import Hledger.Reports.ReportTypes -- | A simple balance report. It has: -- -- 1. a list of items, one per account, each containing: -- -- * the full account name -- -- * the Ledger-style elided short account name -- (the leaf account name, prefixed by any boring parents immediately above); -- or with --flat, the full account name again -- -- * the number of indentation steps for rendering a Ledger-style account tree, -- taking into account elided boring parents, --no-elide and --flat -- -- * an amount -- -- 2. the total of all amounts -- type BalanceReport = ([BalanceReportItem], MixedAmount) type BalanceReportItem = (AccountName, AccountName, Int, MixedAmount) -- | When true (the default), this makes balance --flat reports and their implementation clearer. -- Single/multi-col balance reports currently aren't all correct if this is false. flatShowsExclusiveBalance = True -- | Enabling this makes balance --flat --empty also show parent accounts without postings, -- in addition to those with postings and a zero balance. Disabling it shows only the latter. -- No longer supported, but leave this here for a bit. -- flatShowsPostinglessAccounts = True -- | Generate a simple balance report, containing the matched accounts and -- their balances (change of balance) during the specified period. -- If the normalbalance_ option is set, it adjusts the sorting and sign of -- amounts (see ReportOpts and CompoundBalanceCommand). balanceReport :: ReportSpec -> Journal -> BalanceReport balanceReport rspec j = (rows, total) where report = multiBalanceReport rspec j rows = [( prrFullName row , prrDisplayName row , prrDepth row - 1 -- BalanceReport uses 0-based account depths , prrTotal row ) | row <- prRows report] total = prrTotal $ prTotals report -- tests Right samplejournal2 = journalBalanceTransactions defbalancingopts nulljournal{ jtxns = [ txnTieKnot Transaction{ tindex=0, tsourcepos=nullsourcepos, tdate=fromGregorian 2008 01 01, tdate2=Just $ fromGregorian 2009 01 01, tstatus=Unmarked, tcode="", tdescription="income", tcomment="", ttags=[], tpostings= [posting {paccount="assets:bank:checking", pamount=mixedAmount (usd 1)} ,posting {paccount="income:salary", pamount=missingmixedamt} ], tprecedingcomment="" } ] } tests_BalanceReport = testGroup "BalanceReport" [ let (rspec,journal) `gives` r = do let opts' = rspec{_rsQuery=And [queryFromFlags $ _rsReportOpts rspec, _rsQuery rspec]} (eitems, etotal) = r (aitems, atotal) = balanceReport opts' journal showw (acct,acct',indent,amt) = (acct, acct', indent, showMixedAmountDebug amt) (map showw aitems) @?= (map showw eitems) (showMixedAmountDebug atotal) @?= (showMixedAmountDebug etotal) in testGroup "balanceReport" [ testCase "no args, null journal" $ (defreportspec, nulljournal) `gives` ([], nullmixedamt) ,testCase "no args, sample journal" $ (defreportspec, samplejournal) `gives` ([ ("assets:bank:checking","assets:bank:checking",0, mixedAmount (usd 1)) ,("assets:bank:saving","assets:bank:saving",0, mixedAmount (usd 1)) ,("assets:cash","assets:cash",0, mixedAmount (usd (-2))) ,("expenses:food","expenses:food",0, mixedAmount (usd 1)) ,("expenses:supplies","expenses:supplies",0, mixedAmount (usd 1)) ,("income:gifts","income:gifts",0, mixedAmount (usd (-1))) ,("income:salary","income:salary",0, mixedAmount (usd (-1))) ], mixedAmount (usd 0)) ,testCase "with --tree" $ (defreportspec{_rsReportOpts=defreportopts{accountlistmode_=ALTree}}, samplejournal) `gives` ([ ("assets","assets",0, mixedAmount (usd 0)) ,("assets:bank","bank",1, mixedAmount (usd 2)) ,("assets:bank:checking","checking",2, mixedAmount (usd 1)) ,("assets:bank:saving","saving",2, mixedAmount (usd 1)) ,("assets:cash","cash",1, mixedAmount (usd (-2))) ,("expenses","expenses",0, mixedAmount (usd 2)) ,("expenses:food","food",1, mixedAmount (usd 1)) ,("expenses:supplies","supplies",1, mixedAmount (usd 1)) ,("income","income",0, mixedAmount (usd (-2))) ,("income:gifts","gifts",1, mixedAmount (usd (-1))) ,("income:salary","salary",1, mixedAmount (usd (-1))) ], mixedAmount (usd 0)) ,testCase "with --depth=N" $ (defreportspec{_rsReportOpts=defreportopts{depth_=Just 1}}, samplejournal) `gives` ([ ("expenses", "expenses", 0, mixedAmount (usd 2)) ,("income", "income", 0, mixedAmount (usd (-2))) ], mixedAmount (usd 0)) ,testCase "with depth:N" $ (defreportspec{_rsQuery=Depth 1}, samplejournal) `gives` ([ ("expenses", "expenses", 0, mixedAmount (usd 2)) ,("income", "income", 0, mixedAmount (usd (-2))) ], mixedAmount (usd 0)) ,testCase "with date:" $ (defreportspec{_rsQuery=Date $ DateSpan (Just $ Exact $ fromGregorian 2009 01 01) (Just $ Exact $ fromGregorian 2010 01 01)}, samplejournal2) `gives` ([], nullmixedamt) ,testCase "with date2:" $ (defreportspec{_rsQuery=Date2 $ DateSpan (Just $ Exact $ fromGregorian 2009 01 01) (Just $ Exact $ fromGregorian 2010 01 01)}, samplejournal2) `gives` ([ ("assets:bank:checking","assets:bank:checking",0,mixedAmount (usd 1)) ,("income:salary","income:salary",0,mixedAmount (usd (-1))) ], mixedAmount (usd 0)) ,testCase "with desc:" $ (defreportspec{_rsQuery=Desc $ toRegexCI' "income"}, samplejournal) `gives` ([ ("assets:bank:checking","assets:bank:checking",0,mixedAmount (usd 1)) ,("income:salary","income:salary",0, mixedAmount (usd (-1))) ], mixedAmount (usd 0)) ,testCase "with not:desc:" $ (defreportspec{_rsQuery=Not . Desc $ toRegexCI' "income"}, samplejournal) `gives` ([ ("assets:bank:saving","assets:bank:saving",0, mixedAmount (usd 1)) ,("assets:cash","assets:cash",0, mixedAmount (usd (-2))) ,("expenses:food","expenses:food",0, mixedAmount (usd 1)) ,("expenses:supplies","expenses:supplies",0, mixedAmount (usd 1)) ,("income:gifts","income:gifts",0, mixedAmount (usd (-1))) ], mixedAmount (usd 0)) ,testCase "with period on a populated period" $ (defreportspec{_rsReportOpts=defreportopts{period_= PeriodBetween (fromGregorian 2008 1 1) (fromGregorian 2008 1 2)}}, samplejournal) `gives` ( [ ("assets:bank:checking","assets:bank:checking",0, mixedAmount (usd 1)) ,("income:salary","income:salary",0, mixedAmount (usd (-1))) ], mixedAmount (usd 0)) ,testCase "with period on an unpopulated period" $ (defreportspec{_rsReportOpts=defreportopts{period_= PeriodBetween (fromGregorian 2008 1 2) (fromGregorian 2008 1 3)}}, samplejournal) `gives` ([], nullmixedamt) {- ,testCase "accounts report with account pattern o" ~: defreportopts{patterns_=["o"]} `gives` [" $1 expenses:food" ," $-2 income" ," $-1 gifts" ," $-1 salary" ,"--------------------" ," $-1" ] ,testCase "accounts report with account pattern o and --depth 1" ~: defreportopts{patterns_=["o"],depth_=Just 1} `gives` [" $1 expenses" ," $-2 income" ,"--------------------" ," $-1" ] ,testCase "accounts report with account pattern a" ~: defreportopts{patterns_=["a"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ," $-1 income:salary" ," $1 liabilities:debts" ,"--------------------" ," $-1" ] ,testCase "accounts report with account pattern e" ~: defreportopts{patterns_=["e"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ," $2 expenses" ," $1 food" ," $1 supplies" ," $-2 income" ," $-1 gifts" ," $-1 salary" ," $1 liabilities:debts" ,"--------------------" ," 0" ] ,testCase "accounts report with unmatched parent of two matched subaccounts" ~: defreportopts{patterns_=["cash","saving"]} `gives` [" $-1 assets" ," $1 bank:saving" ," $-2 cash" ,"--------------------" ," $-1" ] ,testCase "accounts report with multi-part account name" ~: defreportopts{patterns_=["expenses:food"]} `gives` [" $1 expenses:food" ,"--------------------" ," $1" ] ,testCase "accounts report with negative account pattern" ~: defreportopts{patterns_=["not:assets"]} `gives` [" $2 expenses" ," $1 food" ," $1 supplies" ," $-2 income" ," $-1 gifts" ," $-1 salary" ," $1 liabilities:debts" ,"--------------------" ," $1" ] ,testCase "accounts report negative account pattern always matches full name" ~: defreportopts{patterns_=["not:e"]} `gives` ["--------------------" ," 0" ] ,testCase "accounts report negative patterns affect totals" ~: defreportopts{patterns_=["expenses","not:food"]} `gives` [" $1 expenses:supplies" ,"--------------------" ," $1" ] ,testCase "accounts report with -E shows zero-balance accounts" ~: defreportopts{patterns_=["assets"],empty_=True} `gives` [" $-1 assets" ," $1 bank" ," 0 checking" ," $1 saving" ," $-2 cash" ,"--------------------" ," $-1" ] ,testCase "accounts report with cost basis" $ j <- (readJournal def Nothing $ unlines ["" ,"2008/1/1 test " ," a:b 10h @ $50" ," c:d " ]) >>= either error' return let j' = journalCanonicaliseAmounts $ journalToCost ToCost j -- enable cost basis adjustment balanceReportAsText defreportopts (balanceReport defreportopts Any j') `is` [" $500 a:b" ," $-500 c:d" ,"--------------------" ," 0" ] -} ] ] hledger-lib-1.30/Hledger/Reports/BudgetReport.hs0000644000000000000000000005632014434445206017767 0ustar0000000000000000{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} module Hledger.Reports.BudgetReport ( BudgetGoal, BudgetTotal, BudgetAverage, BudgetCell, BudgetReportRow, BudgetReport, budgetReport, budgetReportAsTable, budgetReportAsText, budgetReportAsCsv, -- * Helpers combineBudgetAndActual, -- * Tests tests_BudgetReport ) where import Control.Applicative ((<|>)) import Control.Arrow ((***)) import Data.Decimal (roundTo) import Data.Function (on) import Data.HashMap.Strict (HashMap) import qualified Data.HashMap.Strict as HM import Data.List (find, partition, transpose, foldl', maximumBy) import Data.List.Extra (nubSort) import Data.Maybe (fromMaybe, catMaybes, isJust) import Data.Map (Map) import qualified Data.Map as Map import qualified Data.Set as S import Data.Text (Text) import qualified Data.Text as T import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy.Builder as TB import Safe (minimumDef) --import System.Console.CmdArgs.Explicit as C --import Lucid as L import qualified Text.Tabular.AsciiWide as Tab import Hledger.Data import Hledger.Utils import Hledger.Reports.ReportOptions import Hledger.Reports.ReportTypes import Hledger.Reports.MultiBalanceReport import Data.Ord (comparing) import Control.Monad ((>=>)) type BudgetGoal = Change type BudgetTotal = Total type BudgetAverage = Average -- | A budget report tracks expected and actual changes per account and subperiod. type BudgetCell = (Maybe Change, Maybe BudgetGoal) type BudgetReportRow = PeriodicReportRow DisplayName BudgetCell type BudgetReport = PeriodicReport DisplayName BudgetCell type BudgetDisplayCell = (WideBuilder, Maybe (WideBuilder, Maybe WideBuilder)) type BudgetDisplayRow = [BudgetDisplayCell] type BudgetShowMixed = MixedAmount -> [WideBuilder] type BudgetPercBudget = Change -> BudgetGoal -> [Maybe Percentage] -- | Calculate per-account, per-period budget (balance change) goals -- from all periodic transactions, calculate actual balance changes -- from the regular transactions, and compare these to get a 'BudgetReport'. -- Unbudgeted accounts may be hidden or renamed (see journalWithBudgetAccountNames). budgetReport :: ReportSpec -> BalancingOpts -> DateSpan -> Journal -> BudgetReport budgetReport rspec bopts reportspan j = dbg4 "sortedbudgetreport" budgetreport where -- Budget report demands ALTree mode to ensure subaccounts and subaccount budgets are properly handled -- and that reports with and without --empty make sense when compared side by side ropts = (_rsReportOpts rspec){ accountlistmode_ = ALTree } showunbudgeted = empty_ ropts budgetedaccts = dbg3 "budgetedacctsinperiod" $ S.fromList $ expandAccountNames $ accountNamesFromPostings $ concatMap tpostings $ concatMap (\pt -> runPeriodicTransaction False pt reportspan) $ jperiodictxns j actualj = journalWithBudgetAccountNames budgetedaccts showunbudgeted j budgetj = journalAddBudgetGoalTransactions bopts ropts reportspan j priceoracle = journalPriceOracle (infer_prices_ ropts) j budgetgoalreport@(PeriodicReport _ budgetgoalitems budgetgoaltotals) = dbg5 "budgetgoalreport" $ multiBalanceReportWith rspec{_rsReportOpts=ropts{empty_=True}} budgetj priceoracle mempty budgetedacctsseen = S.fromList $ map prrFullName budgetgoalitems actualreport@(PeriodicReport actualspans _ _) = dbg5 "actualreport" $ multiBalanceReportWith rspec{_rsReportOpts=ropts{empty_=True}} actualj priceoracle budgetedacctsseen budgetgoalreport' -- If no interval is specified: -- budgetgoalreport's span might be shorter actualreport's due to periodic txns; -- it should be safe to replace it with the latter, so they combine well. | interval_ ropts == NoInterval = PeriodicReport actualspans budgetgoalitems budgetgoaltotals | otherwise = budgetgoalreport budgetreport = combineBudgetAndActual ropts j budgetgoalreport' actualreport -- | Use all (or all matched by --budget's argument) periodic transactions in the journal -- to generate budget goal transactions in the specified date span (and before, to support -- --historical. The precise start date is the natural start date of the largest interval -- of the active periodic transaction rules that is on or before the earlier of journal start date, -- report start date.) -- Budget goal transactions are similar to forecast transactions except their purpose -- and effect is to define balance change goals, per account and period, for BudgetReport. -- journalAddBudgetGoalTransactions :: BalancingOpts -> ReportOpts -> DateSpan -> Journal -> Journal journalAddBudgetGoalTransactions bopts ropts reportspan j = either error' id $ -- PARTIAL: (journalApplyCommodityStyles >=> journalBalanceTransactions bopts) j{ jtxns = budgetts } where budgetspan = dbg3 "budget span" $ DateSpan (Exact <$> mbudgetgoalsstartdate) (Exact <$> spanEnd reportspan) where mbudgetgoalsstartdate = -- We want to also generate budget goal txns before the report start date, in case -H is used. -- What should the actual starting date for goal txns be ? This gets tricky. -- Consider a journal with a "~ monthly" periodic transaction rule, where the first transaction is on 1/5. -- Users will certainly expect a budget goal for january, but "~ monthly" generates transactions -- on the first of month, and starting from 1/5 would exclude 1/1. -- Secondly, consider a rule like "~ every february 2nd from 2020/01"; we should not start that -- before 2020-02-02. -- Hopefully the following algorithm produces intuitive behaviour in general: -- from the earlier of the journal start date and the report start date, -- move backward to the nearest natural start date of the largest period seen among the -- active periodic transactions, unless that is disallowed by a start date in the periodic rule. -- (Do we need to pay attention to an end date in the rule ? Don't think so.) -- (So with "~ monthly", the journal start date 1/5 is adjusted to 1/1.) case minimumDef Nothing $ filter isJust [journalStartDate False j, spanStart reportspan] of Nothing -> Nothing Just d -> Just d' where -- the interval and any date span of the periodic transaction with longest period (intervl, spn) = case budgetpts of [] -> (Days 1, nulldatespan) pts -> (ptinterval pt, ptspan pt) where pt = maximumBy (comparing ptinterval) pts -- PARTIAL: maximumBy won't fail -- the natural start of this interval on or before the journal/report start intervalstart = intervalBoundaryBefore intervl d -- the natural interval start before the journal/report start, -- or the rule-specified start if later, -- but no later than the journal/report start. d' = min d $ maybe intervalstart (max intervalstart) $ spanStart spn -- select periodic transactions matching a pattern -- (the argument of the (final) --budget option). -- XXX two limitations/wishes, requiring more extensive type changes: -- - give an error if pat is non-null and matches no periodic txns -- - allow a regexp or a full hledger query, not just a substring pat = fromMaybe "" $ dbg3 "budget pattern" $ T.toLower <$> budgetpat_ ropts budgetpts = [pt | pt <- jperiodictxns j, pat `T.isInfixOf` T.toLower (ptdescription pt)] budgetts = dbg5 "budget goal txns" $ [makeBudgetTxn t | pt <- budgetpts , t <- runPeriodicTransaction False pt budgetspan ] makeBudgetTxn t = txnTieKnot $ t { tdescription = T.pack "Budget transaction" } -- | Adjust a journal's account names for budget reporting, in two ways: -- -- 1. accounts with no budget goal anywhere in their ancestry are moved -- under the "unbudgeted" top level account. -- -- 2. subaccounts with no budget goal are merged with their closest parent account -- with a budget goal, so that only budgeted accounts are shown. -- This can be disabled by -E/--empty. -- journalWithBudgetAccountNames :: S.Set AccountName -> Bool -> Journal -> Journal journalWithBudgetAccountNames budgetedaccts showunbudgeted j = dbg5With (("budget account names: "++).pshow.journalAccountNamesUsed) $ j { jtxns = remapTxn <$> jtxns j } where remapTxn = txnTieKnot . transactionTransformPostings remapPosting remapPosting p = p { paccount = remapAccount $ paccount p, poriginal = poriginal p <|> Just p } remapAccount a | a `S.member` budgetedaccts = a | Just p <- budgetedparent = if showunbudgeted then a else p | otherwise = if showunbudgeted then u <> acctsep <> a else u where budgetedparent = find (`S.member` budgetedaccts) $ parentAccountNames a u = unbudgetedAccountName -- | Combine a per-account-and-subperiod report of budget goals, and one -- of actual change amounts, into a budget performance report. -- The two reports should have the same report interval, but need not -- have exactly the same account rows or date columns. -- (Cells in the combined budget report can be missing a budget goal, -- an actual amount, or both.) The combined report will include: -- -- - consecutive subperiods at the same interval as the two reports, -- spanning the period of both reports -- -- - all accounts mentioned in either report, sorted by account code or -- account name or amount as appropriate. -- combineBudgetAndActual :: ReportOpts -> Journal -> MultiBalanceReport -> MultiBalanceReport -> BudgetReport combineBudgetAndActual ropts j (PeriodicReport budgetperiods budgetrows (PeriodicReportRow _ budgettots budgetgrandtot budgetgrandavg)) (PeriodicReport actualperiods actualrows (PeriodicReportRow _ actualtots actualgrandtot actualgrandavg)) = PeriodicReport periods sortedrows totalrow where periods = nubSort . filter (/= nulldatespan) $ budgetperiods ++ actualperiods -- first, combine any corresponding budget goals with actual changes rows1 = [ PeriodicReportRow acct amtandgoals totamtandgoal avgamtandgoal | PeriodicReportRow acct actualamts actualtot actualavg <- actualrows , let mbudgetgoals = HM.lookup (displayFull acct) budgetGoalsByAcct :: Maybe ([BudgetGoal], BudgetTotal, BudgetAverage) , let budgetmamts = maybe (Nothing <$ periods) (map Just . first3) mbudgetgoals :: [Maybe BudgetGoal] , let mbudgettot = second3 <$> mbudgetgoals :: Maybe BudgetTotal , let mbudgetavg = third3 <$> mbudgetgoals :: Maybe BudgetAverage , let acctBudgetByPeriod = Map.fromList [ (p,budgetamt) | (p, Just budgetamt) <- zip budgetperiods budgetmamts ] :: Map DateSpan BudgetGoal , let acctActualByPeriod = Map.fromList [ (p,actualamt) | (p, Just actualamt) <- zip actualperiods (map Just actualamts) ] :: Map DateSpan Change , let amtandgoals = [ (Map.lookup p acctActualByPeriod, Map.lookup p acctBudgetByPeriod) | p <- periods ] :: [BudgetCell] , let totamtandgoal = (Just actualtot, mbudgettot) , let avgamtandgoal = (Just actualavg, mbudgetavg) ] where budgetGoalsByAcct :: HashMap AccountName ([BudgetGoal], BudgetTotal, BudgetAverage) = HM.fromList [ (displayFull acct, (amts, tot, avg)) | PeriodicReportRow acct amts tot avg <- budgetrows ] -- next, make rows for budget goals with no actual changes rows2 = [ PeriodicReportRow acct amtandgoals totamtandgoal avgamtandgoal | PeriodicReportRow acct budgetgoals budgettot budgetavg <- budgetrows , displayFull acct `notElem` map prrFullName rows1 , let acctBudgetByPeriod = Map.fromList $ zip budgetperiods budgetgoals :: Map DateSpan BudgetGoal , let amtandgoals = [ (Nothing, Map.lookup p acctBudgetByPeriod) | p <- periods ] :: [BudgetCell] , let totamtandgoal = (Nothing, Just budgettot) , let avgamtandgoal = (Nothing, Just budgetavg) ] -- combine and re-sort rows -- TODO: add --sort-budget to sort by budget goal amount sortedrows :: [BudgetReportRow] = sortRowsLike (mbrsorted unbudgetedrows ++ mbrsorted rows') rows where (unbudgetedrows, rows') = partition ((==unbudgetedAccountName) . prrFullName) rows mbrsorted = map prrFullName . sortRows ropts j . map (fmap $ fromMaybe nullmixedamt . fst) rows = rows1 ++ rows2 totalrow = PeriodicReportRow () [ (Map.lookup p totActualByPeriod, Map.lookup p totBudgetByPeriod) | p <- periods ] ( Just actualgrandtot, budget budgetgrandtot ) ( Just actualgrandavg, budget budgetgrandavg ) where totBudgetByPeriod = Map.fromList $ zip budgetperiods budgettots :: Map DateSpan BudgetTotal totActualByPeriod = Map.fromList $ zip actualperiods actualtots :: Map DateSpan Change budget b = if mixedAmountLooksZero b then Nothing else Just b -- | Render a budget report as plain text suitable for console output. budgetReportAsText :: ReportOpts -> BudgetReport -> TL.Text budgetReportAsText ropts@ReportOpts{..} budgetr = TB.toLazyText $ TB.fromText title <> TB.fromText "\n\n" <> balanceReportTableAsText ropts (budgetReportAsTable ropts budgetr) where title = "Budget performance in " <> showDateSpan (periodicReportSpan budgetr) <> (case conversionop_ of Just ToCost -> ", converted to cost" _ -> "") <> (case value_ of Just (AtThen _mc) -> ", valued at posting date" Just (AtEnd _mc) -> ", valued at period ends" Just (AtNow _mc) -> ", current value" Just (AtDate d _mc) -> ", valued at " <> showDate d Nothing -> "") <> ":" -- | Build a 'Table' from a multi-column balance report. budgetReportAsTable :: ReportOpts -> BudgetReport -> Tab.Table Text Text WideBuilder budgetReportAsTable ReportOpts{..} (PeriodicReport spans items tr) = maybetransposetable $ addtotalrow $ Tab.Table (Tab.Group Tab.NoLine $ map Tab.Header accts) (Tab.Group Tab.NoLine $ map Tab.Header colheadings) rows where colheadings = ["Commodity" | layout_ == LayoutBare] ++ map (reportPeriodName balanceaccum_ spans) spans ++ [" Total" | row_total_] ++ ["Average" | average_] -- FIXME. Have to check explicitly for which to render here, since -- budgetReport sets accountlistmode to ALTree. Find a principled way to do -- this. renderacct row = case accountlistmode_ of ALTree -> T.replicate ((prrDepth row - 1)*2) " " <> prrDisplayName row ALFlat -> accountNameDrop (drop_) $ prrFullName row addtotalrow | no_total_ = id | otherwise = let rh = Tab.Group Tab.NoLine . replicate (length totalrows) $ Tab.Header "" ch = Tab.Header [] -- ignored in (flip (Tab.concatTables Tab.SingleLine) $ Tab.Table rh ch totalrows) maybetranspose | transpose_ = transpose | otherwise = id maybetransposetable | transpose_ = \(Tab.Table rh ch vals) -> Tab.Table ch rh (transpose vals) | otherwise = id (accts, rows, totalrows) = (accts', prependcs itemscs (padcells texts), prependcs trcs (padtr trtexts)) where shownitems :: [[(AccountName, WideBuilder, BudgetDisplayRow)]] shownitems = (fmap (\i -> fmap (\(cs, cvals) -> (renderacct i, cs, cvals)) . showrow $ rowToBudgetCells i) items) (accts', itemscs, texts) = unzip3 $ concat shownitems showntr :: [[(WideBuilder, BudgetDisplayRow)]] showntr = [showrow $ rowToBudgetCells tr] (trcs, trtexts) = unzip $ concat showntr trwidths | transpose_ = drop (length texts) widths | otherwise = widths padcells = maybetranspose . fmap (fmap (uncurry paddisplaycell) . zip widths) . maybetranspose padtr = maybetranspose . fmap (fmap (uncurry paddisplaycell) . zip trwidths) . maybetranspose -- commodities are shown with the amounts without `layout_ == LayoutBare` prependcs cs | layout_ /= LayoutBare = id | otherwise = zipWith (:) cs rowToBudgetCells (PeriodicReportRow _ as rowtot rowavg) = as ++ [rowtot | row_total_ && not (null as)] ++ [rowavg | average_ && not (null as)] -- functions for displaying budget cells depending on `commodity-layout_` option rowfuncs :: [CommoditySymbol] -> (BudgetShowMixed, BudgetPercBudget) rowfuncs cs = case layout_ of LayoutWide width -> ( pure . showMixedAmountB oneLine{displayColour=color_, displayMaxWidth=width} , \a -> pure . percentage a) _ -> ( showMixedAmountLinesB noPrice{displayOrder=Just cs, displayMinWidth=Nothing, displayColour=color_} , \a b -> fmap (percentage' a b) cs) showrow :: [BudgetCell] -> [(WideBuilder, BudgetDisplayRow)] showrow row = let cs = budgetCellsCommodities row (showmixed, percbudget) = rowfuncs cs in zip (fmap wbFromText cs) . transpose . fmap (showcell showmixed percbudget) $ row budgetCellsCommodities = S.toList . foldl' S.union mempty . fmap budgetCellCommodities budgetCellCommodities :: BudgetCell -> S.Set CommoditySymbol budgetCellCommodities (am, bm) = f am `S.union` f bm where f = maybe mempty maCommodities cellswidth :: [BudgetCell] -> [[(Int, Int, Int)]] cellswidth row = let cs = budgetCellsCommodities row (showmixed, percbudget) = rowfuncs cs disp = showcell showmixed percbudget budgetpercwidth = wbWidth *** maybe 0 wbWidth cellwidth (am, bm) = let (bw, pw) = maybe (0, 0) budgetpercwidth bm in (wbWidth am, bw, pw) in fmap (fmap cellwidth . disp) row -- build a list of widths for each column. In the case of transposed budget -- reports, the total 'row' must be included in this list widths = zip3 actualwidths budgetwidths percentwidths where actualwidths = map (maximum' . map first3 ) $ cols budgetwidths = map (maximum' . map second3) $ cols percentwidths = map (maximum' . map third3 ) $ cols catcolumnwidths = foldl' (zipWith (++)) $ repeat [] cols = maybetranspose $ catcolumnwidths $ map (cellswidth . rowToBudgetCells) items ++ [cellswidth $ rowToBudgetCells tr] -- split a BudgetCell into BudgetDisplayCell's (one per commodity when applicable) showcell :: BudgetShowMixed -> BudgetPercBudget -> BudgetCell -> BudgetDisplayRow showcell showmixed percbudget (actual, mbudget) = zip (showmixed actual') full where actual' = fromMaybe nullmixedamt actual budgetAndPerc b = zip (showmixed b) (fmap (wbFromText . T.pack . show . roundTo 0) <$> percbudget actual' b) full | Just b <- mbudget = Just <$> budgetAndPerc b | otherwise = repeat Nothing paddisplaycell :: (Int, Int, Int) -> BudgetDisplayCell -> WideBuilder paddisplaycell (actualwidth, budgetwidth, percentwidth) (actual, mbudget) = full where toPadded (WideBuilder b w) = (TB.fromText . flip T.replicate " " $ actualwidth - w) <> b (totalpercentwidth, totalbudgetwidth) = let totalpercentwidth' = if percentwidth == 0 then 0 else percentwidth + 5 in ( totalpercentwidth' , if budgetwidth == 0 then 0 else budgetwidth + totalpercentwidth' + 3 ) -- | Display a padded budget string budgetb (budget, perc) = let perct = case perc of Nothing -> T.replicate totalpercentwidth " " Just pct -> T.replicate (percentwidth - wbWidth pct) " " <> wbToText pct <> "% of " in TB.fromText $ " [" <> perct <> T.replicate (budgetwidth - wbWidth budget) " " <> wbToText budget <> "]" emptyBudget = TB.fromText $ T.replicate totalbudgetwidth " " full = flip WideBuilder (actualwidth + totalbudgetwidth) $ toPadded actual <> maybe emptyBudget budgetb mbudget -- | Calculate the percentage of actual change to budget goal to show, if any. -- If valuing at cost, both amounts are converted to cost before comparing. -- A percentage will not be shown if: -- - actual or goal are not the same, single, commodity -- - the goal is zero percentage :: Change -> BudgetGoal -> Maybe Percentage percentage actual budget = case (costedAmounts actual, costedAmounts budget) of ([a], [b]) | (acommodity a == acommodity b || amountLooksZero a) && not (amountLooksZero b) -> Just $ 100 * aquantity a / aquantity b _ -> -- trace (pshow $ (maybecost actual, maybecost budget)) -- debug missing percentage Nothing where costedAmounts = case conversionop_ of Just ToCost -> amounts . mixedAmountCost _ -> amounts -- | Calculate the percentage of actual change to budget goal for a particular commodity percentage' :: Change -> BudgetGoal -> CommoditySymbol -> Maybe Percentage percentage' am bm c = case ((,) `on` find ((==) c . acommodity) . amounts) am bm of (Just a, Just b) -> percentage (mixedAmount a) (mixedAmount b) _ -> Nothing -- XXX generalise this with multiBalanceReportAsCsv ? -- | Render a budget report as CSV. Like multiBalanceReportAsCsv, -- but includes alternating actual and budget amount columns. budgetReportAsCsv :: ReportOpts -> BudgetReport -> [[Text]] budgetReportAsCsv ReportOpts{..} (PeriodicReport colspans items tr) = (if transpose_ then transpose else id) $ -- heading row -- heading row ("Account" : ["Commodity" | layout_ == LayoutBare ] ++ concatMap (\spn -> [showDateSpan spn, "budget"]) colspans ++ concat [["Total" ,"budget"] | row_total_] ++ concat [["Average","budget"] | average_] ) : -- account rows -- account rows concatMap (rowAsTexts prrFullName) items -- totals row ++ concat [ rowAsTexts (const "Total:") tr | not no_total_ ] where flattentuples tups = concat [[a,b] | (a,b) <- tups] showNorm = maybe "" (wbToText . showMixedAmountB oneLine) rowAsTexts :: (PeriodicReportRow a BudgetCell -> Text) -> PeriodicReportRow a BudgetCell -> [[Text]] rowAsTexts render row@(PeriodicReportRow _ as (rowtot,budgettot) (rowavg, budgetavg)) | layout_ /= LayoutBare = [render row : fmap showNorm vals] | otherwise = joinNames . zipWith (:) cs -- add symbols and names . transpose -- each row becomes a list of Text quantities . fmap (fmap wbToText . showMixedAmountLinesB oneLine{displayOrder=Just cs, displayMinWidth=Nothing} .fromMaybe nullmixedamt) $ vals where cs = S.toList . foldl' S.union mempty . fmap maCommodities $ catMaybes vals vals = flattentuples as ++ concat [[rowtot, budgettot] | row_total_] ++ concat [[rowavg, budgetavg] | average_] joinNames = fmap (render row :) -- tests tests_BudgetReport = testGroup "BudgetReport" [ ] hledger-lib-1.30/Hledger/Reports/EntriesReport.hs0000644000000000000000000000300414434445206020155 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} {-| Journal entries report, used by the print command. -} module Hledger.Reports.EntriesReport ( EntriesReport, EntriesReportItem, entriesReport, -- * Tests tests_EntriesReport ) where import Data.List (sortBy) import Data.Ord (comparing) import Data.Time (fromGregorian) import Hledger.Data import Hledger.Query (Query(..)) import Hledger.Reports.ReportOptions import Hledger.Utils -- | A journal entries report is a list of whole transactions as -- originally entered in the journal (mostly). This is used by eg -- hledger's print command and hledger-web's journal entries view. type EntriesReport = [EntriesReportItem] type EntriesReportItem = Transaction -- | Select transactions for an entries report. entriesReport :: ReportSpec -> Journal -> EntriesReport entriesReport rspec@ReportSpec{_rsReportOpts=ropts} = sortBy (comparing $ transactionDateFn ropts) . jtxns . journalApplyValuationFromOpts (setDefaultConversionOp NoConversionOp rspec) . filterJournalTransactions (_rsQuery rspec) tests_EntriesReport = testGroup "EntriesReport" [ testGroup "entriesReport" [ testCase "not acct" $ (length $ entriesReport defreportspec{_rsQuery=Not . Acct $ toRegex' "bank"} samplejournal) @?= 1 ,testCase "date" $ (length $ entriesReport defreportspec{_rsQuery=Date $ DateSpan (Just $ Exact $ fromGregorian 2008 06 01) (Just $ Exact $ fromGregorian 2008 07 01)} samplejournal) @?= 3 ] ] hledger-lib-1.30/Hledger/Reports/MultiBalanceReport.hs0000644000000000000000000010171214434445206021111 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-| Multi-column balance reports, used by the balance command. -} module Hledger.Reports.MultiBalanceReport ( MultiBalanceReport, MultiBalanceReportRow, multiBalanceReport, multiBalanceReportWith, compoundBalanceReport, compoundBalanceReportWith, sortRows, sortRowsLike, -- * Helper functions makeReportQuery, getPostingsByColumn, getPostings, startingPostings, generateMultiBalanceReport, balanceReportTableAsText, -- -- * Tests tests_MultiBalanceReport ) where import Control.Monad (guard) import Data.Bifunctor (second) import Data.Foldable (toList) import Data.List (sortOn, transpose) import Data.List.NonEmpty (NonEmpty(..)) import Data.HashMap.Strict (HashMap) import qualified Data.HashMap.Strict as HM import Data.Map (Map) import qualified Data.Map as M import Data.Maybe (fromMaybe, isJust, mapMaybe) import Data.Ord (Down(..)) import Data.Semigroup (sconcat) import Data.Set (Set) import qualified Data.Set as Set import Data.Time.Calendar (fromGregorian) import Safe (lastDef, minimumMay) import Data.Default (def) import qualified Data.Text as T import qualified Data.Text.Lazy.Builder as TB import qualified Text.Tabular.AsciiWide as Tab import Hledger.Data import Hledger.Query import Hledger.Utils hiding (dbg3,dbg4,dbg5) import qualified Hledger.Utils import Hledger.Reports.ReportOptions import Hledger.Reports.ReportTypes -- add a prefix to this function's debug output dbg3 s = let p = "multiBalanceReport" in Hledger.Utils.dbg3 (p++" "++s) dbg4 s = let p = "multiBalanceReport" in Hledger.Utils.dbg4 (p++" "++s) dbg5 s = let p = "multiBalanceReport" in Hledger.Utils.dbg5 (p++" "++s) -- | A multi balance report is a kind of periodic report, where the amounts -- correspond to balance changes or ending balances in a given period. It has: -- -- 1. a list of each column's period (date span) -- -- 2. a list of rows, each containing: -- -- * the full account name, display name, and display depth -- -- * A list of amounts, one for each column. -- -- * the total of the row's amounts for a periodic report -- -- * the average of the row's amounts -- -- 3. the column totals, and the overall grand total (or zero for -- cumulative/historical reports) and grand average. type MultiBalanceReport = PeriodicReport DisplayName MixedAmount type MultiBalanceReportRow = PeriodicReportRow DisplayName MixedAmount -- type alias just to remind us which AccountNames might be depth-clipped, below. type ClippedAccountName = AccountName -- | Generate a multicolumn balance report for the matched accounts, -- showing the change of balance, accumulated balance, or historical balance -- in each of the specified periods. If the normalbalance_ option is set, it -- adjusts the sorting and sign of amounts (see ReportOpts and -- CompoundBalanceCommand). hledger's most powerful and useful report, used -- by the balance command (in multiperiod mode) and (via compoundBalanceReport) -- by the bs/cf/is commands. multiBalanceReport :: ReportSpec -> Journal -> MultiBalanceReport multiBalanceReport rspec j = multiBalanceReportWith rspec j (journalPriceOracle infer j) mempty where infer = infer_prices_ $ _rsReportOpts rspec -- | A helper for multiBalanceReport. This one takes some extra arguments, -- a 'PriceOracle' to be used for looking up market prices, and a set of -- 'AccountName's which should not be elided. Commands which run multiple -- reports (bs etc.) can generate the price oracle just once for efficiency, -- passing it to each report by calling this function directly. multiBalanceReportWith :: ReportSpec -> Journal -> PriceOracle -> Set AccountName -> MultiBalanceReport multiBalanceReportWith rspec' j priceoracle unelidableaccts = report where -- Queries, report/column dates. (reportspan, colspans) = reportSpan j rspec' rspec = dbg3 "reportopts" $ makeReportQuery rspec' reportspan -- Group postings into their columns. colps = dbg5 "colps" $ getPostingsByColumn rspec j priceoracle colspans -- The matched accounts with a starting balance. All of these should appear -- in the report, even if they have no postings during the report period. startbals = dbg5 "startbals" $ startingBalances rspec j priceoracle $ startingPostings rspec j priceoracle reportspan -- Generate and postprocess the report, negating balances and taking percentages if needed report = dbg4 "multiBalanceReportWith" $ generateMultiBalanceReport rspec j priceoracle unelidableaccts colps startbals -- | Generate a compound balance report from a list of CBCSubreportSpec. This -- shares postings between the subreports. compoundBalanceReport :: ReportSpec -> Journal -> [CBCSubreportSpec a] -> CompoundPeriodicReport a MixedAmount compoundBalanceReport rspec j = compoundBalanceReportWith rspec j (journalPriceOracle infer j) where infer = infer_prices_ $ _rsReportOpts rspec -- | A helper for compoundBalanceReport, similar to multiBalanceReportWith. compoundBalanceReportWith :: ReportSpec -> Journal -> PriceOracle -> [CBCSubreportSpec a] -> CompoundPeriodicReport a MixedAmount compoundBalanceReportWith rspec' j priceoracle subreportspecs = cbr where -- Queries, report/column dates. (reportspan, colspans) = reportSpan j rspec' rspec = dbg3 "reportopts" $ makeReportQuery rspec' reportspan -- Group postings into their columns. colps = dbg5 "colps" $ getPostingsByColumn rspec j priceoracle colspans -- The matched postings with a starting balance. All of these should appear -- in the report, even if they have no postings during the report period. startps = dbg5 "startps" $ startingPostings rspec j priceoracle reportspan subreports = map generateSubreport subreportspecs where generateSubreport CBCSubreportSpec{..} = ( cbcsubreporttitle -- Postprocess the report, negating balances and taking percentages if needed , cbcsubreporttransform $ generateMultiBalanceReport rspecsub j priceoracle mempty colps' startbals' , cbcsubreportincreasestotal ) where ropts = cbcsubreportoptions $ _rsReportOpts rspec -- Add a restriction to this subreport to the report query. -- XXX in non-thorough way, consider updateReportSpec ? rspecsub = rspec{_rsReportOpts=ropts, _rsQuery=And [cbcsubreportquery, _rsQuery rspec]} -- Starting balances and column postings specific to this subreport. startbals' = startingBalances rspecsub j priceoracle $ filter (matchesPostingExtra (journalAccountType j) cbcsubreportquery) startps colps' = map (second $ filter (matchesPostingExtra (journalAccountType j) cbcsubreportquery)) colps -- Sum the subreport totals by column. Handle these cases: -- - no subreports -- - empty subreports, having no subtotals (#588) -- - subreports with a shorter subtotals row than the others overalltotals = case subreports of [] -> PeriodicReportRow () [] nullmixedamt nullmixedamt (r:rs) -> sconcat $ fmap subreportTotal (r:|rs) where subreportTotal (_, sr, increasestotal) = (if increasestotal then id else fmap maNegate) $ prTotals sr cbr = CompoundPeriodicReport "" (map fst colps) subreports overalltotals -- XXX seems refactorable -- | Calculate accounts' balances on the report start date, from these postings -- which should be all postings before that date, and possibly also from account declarations. startingBalances :: ReportSpec -> Journal -> PriceOracle -> [Posting] -> HashMap AccountName Account startingBalances rspec j priceoracle ps = M.findWithDefault nullacct emptydatespan <$> calculateReportMatrix rspec j priceoracle mempty [(emptydatespan, ps)] -- | Postings needed to calculate starting balances. -- -- Balances at report start date, from all earlier postings which otherwise match the query. -- These balances are unvalued. -- TODO: Do we want to check whether to bother calculating these? isHistorical -- and startDate is not nothing, otherwise mempty? This currently gives a -- failure with some totals which are supposed to be 0 being blank. startingPostings :: ReportSpec -> Journal -> PriceOracle -> DateSpan -> [Posting] startingPostings rspec@ReportSpec{_rsQuery=query,_rsReportOpts=ropts} j priceoracle reportspan = getPostings rspec' j priceoracle where rspec' = rspec{_rsQuery=startbalq,_rsReportOpts=ropts'} -- If we're re-valuing every period, we need to have the unvalued start -- balance, so we can do it ourselves later. ropts' = case value_ ropts of Just (AtEnd _) -> ropts{period_=precedingperiod, value_=Nothing} _ -> ropts{period_=precedingperiod} -- q projected back before the report start date. -- When there's no report start date, in case there are future txns (the hledger-ui case above), -- we use emptydatespan to make sure they aren't counted as starting balance. startbalq = dbg3 "startbalq" $ And [datelessq, precedingspanq] datelessq = dbg3 "datelessq" $ filterQuery (not . queryIsDateOrDate2) query precedingperiod = dateSpanAsPeriod . spanIntersect precedingspan . periodAsDateSpan $ period_ ropts precedingspan = DateSpan Nothing (Exact <$> spanStart reportspan) precedingspanq = (if date2_ ropts then Date2 else Date) $ case precedingspan of DateSpan Nothing Nothing -> emptydatespan a -> a -- | Remove any date queries and insert queries from the report span. -- The user's query expanded to the report span -- if there is one (otherwise any date queries are left as-is, which -- handles the hledger-ui+future txns case above). makeReportQuery :: ReportSpec -> DateSpan -> ReportSpec makeReportQuery rspec reportspan | reportspan == nulldatespan = rspec | otherwise = rspec{_rsQuery=query} where query = simplifyQuery $ And [dateless $ _rsQuery rspec, reportspandatesq] reportspandatesq = dbg3 "reportspandatesq" $ dateqcons reportspan dateless = dbg3 "dateless" . filterQuery (not . queryIsDateOrDate2) dateqcons = if date2_ (_rsReportOpts rspec) then Date2 else Date -- | Group postings, grouped by their column getPostingsByColumn :: ReportSpec -> Journal -> PriceOracle -> [DateSpan] -> [(DateSpan, [Posting])] getPostingsByColumn rspec j priceoracle colspans = groupByDateSpan True getDate colspans ps where -- Postings matching the query within the report period. ps = dbg5 "getPostingsByColumn" $ getPostings rspec j priceoracle -- The date spans to be included as report columns. getDate = postingDateOrDate2 (whichDate (_rsReportOpts rspec)) -- | Gather postings matching the query within the report period. getPostings :: ReportSpec -> Journal -> PriceOracle -> [Posting] getPostings rspec@ReportSpec{_rsQuery=query, _rsReportOpts=ropts} j priceoracle = journalPostings $ journalValueAndFilterPostingsWith rspec' j priceoracle where rspec' = rspec{_rsQuery=depthless, _rsReportOpts = ropts'} ropts' = if isJust (valuationAfterSum ropts) then ropts{value_=Nothing, conversionop_=Just NoConversionOp} -- If we're valuing after the sum, don't do it now else ropts -- The user's query with no depth limit, and expanded to the report span -- if there is one (otherwise any date queries are left as-is, which -- handles the hledger-ui+future txns case above). depthless = dbg3 "depthless" $ filterQuery (not . queryIsDepth) query -- | From set of postings, eg for a single report column, calculate the balance change in each account. -- Accounts and amounts will be depth-clipped appropriately if a depth limit is in effect. -- -- When --declared is used, accounts which have been declared with an account directive -- are also included, with a 0 balance change. But only leaf accounts, since non-leaf -- empty declared accounts are less useful in reports. This is primarily for hledger-ui. acctChanges :: ReportSpec -> Journal -> [Posting] -> HashMap ClippedAccountName Account acctChanges ReportSpec{_rsQuery=query,_rsReportOpts=ReportOpts{accountlistmode_, declared_}} j ps = HM.fromList [(aname a, a) | a <- accts] where -- With --declared, add the query-matching declared accounts -- (as dummy postings so they are processed like the rest). -- This function is used for calculating both pre-start changes and column changes, -- and the declared accounts are really only needed for the former, -- but it's harmless to have them in the column changes as well. ps' = ps ++ if declared_ then declaredacctps else [] where declaredacctps = [nullposting{paccount=a} | a <- journalLeafAccountNamesDeclared j , matchesAccountExtra (journalAccountType j) (journalAccountTags j) accttypetagsq a ] where accttypetagsq = dbg3 "accttypetagsq" $ filterQueryOrNotQuery (\q -> queryIsAcct q || queryIsType q || queryIsTag q) query filterbydepth = case accountlistmode_ of ALTree -> filter ((depthq `matchesAccount`) . aname) -- a tree - just exclude deeper accounts ALFlat -> clipAccountsAndAggregate (queryDepth depthq) -- a list - aggregate deeper accounts at the depth limit . filter ((0<) . anumpostings) -- and exclude empty parent accounts where depthq = dbg3 "depthq" $ filterQuery queryIsDepth query accts = filterbydepth $ drop 1 $ accountsFromPostings ps' -- | Gather the account balance changes into a regular matrix, then -- accumulate and value amounts, as specified by the report options. -- Makes sure all report columns have an entry. calculateReportMatrix :: ReportSpec -> Journal -> PriceOracle -> HashMap ClippedAccountName Account -> [(DateSpan, [Posting])] -> HashMap ClippedAccountName (Map DateSpan Account) calculateReportMatrix rspec@ReportSpec{_rsReportOpts=ropts} j priceoracle startbals colps = -- PARTIAL: -- Ensure all columns have entries, including those with starting balances HM.mapWithKey rowbals allchanges where -- The valued row amounts to be displayed: per-period changes, -- zero-based cumulative totals, or -- starting-balance-based historical balances. rowbals name unvaluedChanges = dbg5 "rowbals" $ case balanceaccum_ ropts of PerPeriod -> changes Cumulative -> cumulative Historical -> historical where -- changes to report on: usually just the valued changes themselves, but use the -- differences in the valued historical amount for CalcValueChange and CalcGain. changes = case balancecalc_ ropts of CalcChange -> M.mapWithKey avalue unvaluedChanges CalcBudget -> M.mapWithKey avalue unvaluedChanges CalcValueChange -> periodChanges valuedStart historical CalcGain -> periodChanges valuedStart historical CalcPostingsCount -> M.mapWithKey avalue unvaluedChanges -- the historical balance is the valued cumulative sum of all unvalued changes historical = M.mapWithKey avalue $ cumulativeSum startingBalance unvaluedChanges -- since this is a cumulative sum of valued amounts, it should not be valued again cumulative = cumulativeSum nullacct changes startingBalance = HM.lookupDefault nullacct name startbals valuedStart = avalue (DateSpan Nothing (Exact <$> historicalDate)) startingBalance -- In each column, get each account's balance changes colacctchanges = dbg5 "colacctchanges" $ map (second $ acctChanges rspec j) colps :: [(DateSpan, HashMap ClippedAccountName Account)] -- Transpose it to get each account's balance changes across all columns acctchanges = dbg5 "acctchanges" $ transposeMap colacctchanges :: HashMap AccountName (Map DateSpan Account) -- Fill out the matrix with zeros in empty cells allchanges = ((<>zeros) <$> acctchanges) <> (zeros <$ startbals) avalue = acctApplyBoth . mixedAmountApplyValuationAfterSumFromOptsWith ropts j priceoracle acctApplyBoth f a = a{aibalance = f $ aibalance a, aebalance = f $ aebalance a} historicalDate = minimumMay $ mapMaybe spanStart colspans zeros = M.fromList [(spn, nullacct) | spn <- colspans] colspans = map fst colps -- | Lay out a set of postings grouped by date span into a regular matrix with rows -- given by AccountName and columns by DateSpan, then generate a MultiBalanceReport -- from the columns. generateMultiBalanceReport :: ReportSpec -> Journal -> PriceOracle -> Set AccountName -> [(DateSpan, [Posting])] -> HashMap AccountName Account -> MultiBalanceReport generateMultiBalanceReport rspec@ReportSpec{_rsReportOpts=ropts} j priceoracle unelidableaccts colps0 startbals = report where -- If doing --count, set all posting amounts to "1". colps = if balancecalc_ ropts == CalcPostingsCount then map (second (map (postingTransformAmount (const $ mixed [num 1])))) colps0 else colps0 -- Process changes into normal, cumulative, or historical amounts, plus value them matrix = calculateReportMatrix rspec j priceoracle startbals colps -- All account names that will be displayed, possibly depth-clipped. displaynames = dbg5 "displaynames" $ displayedAccounts rspec unelidableaccts matrix -- All the rows of the report. rows = dbg5 "rows" . (if invert_ ropts then map (fmap maNegate) else id) -- Negate amounts if applicable $ buildReportRows ropts displaynames matrix -- Calculate column totals totalsrow = dbg5 "totalsrow" $ calculateTotalsRow ropts rows -- Sorted report rows. sortedrows = dbg5 "sortedrows" $ sortRows ropts j rows -- Take percentages if needed report = reportPercent ropts $ PeriodicReport (map fst colps) sortedrows totalsrow -- | Build the report rows. -- One row per account, with account name info, row amounts, row total and row average. -- Rows are unsorted. buildReportRows :: ReportOpts -> HashMap AccountName DisplayName -> HashMap AccountName (Map DateSpan Account) -> [MultiBalanceReportRow] buildReportRows ropts displaynames = toList . HM.mapMaybeWithKey mkRow -- toList of HashMap's Foldable instance - does not sort consistently where mkRow name accts = do displayname <- HM.lookup name displaynames return $ PeriodicReportRow displayname rowbals rowtot rowavg where rowbals = map balance $ toList accts -- toList of Map's Foldable instance - does sort by key -- The total and average for the row. -- These are always simply the sum/average of the displayed row amounts. -- Total for a cumulative/historical report is always the last column. rowtot = case balanceaccum_ ropts of PerPeriod -> maSum rowbals _ -> lastDef nullmixedamt rowbals rowavg = averageMixedAmounts rowbals balance = case accountlistmode_ ropts of ALTree -> aibalance; ALFlat -> aebalance -- | Calculate accounts which are to be displayed in the report, as well as -- their name and depth displayedAccounts :: ReportSpec -> Set AccountName -> HashMap AccountName (Map DateSpan Account) -> HashMap AccountName DisplayName displayedAccounts ReportSpec{_rsQuery=query,_rsReportOpts=ropts} unelidableaccts valuedaccts | qdepth == 0 = HM.singleton "..." $ DisplayName "..." "..." 1 | otherwise = HM.mapWithKey (\a _ -> displayedName a) displayedAccts where -- Accounts which are to be displayed displayedAccts = (if qdepth == 0 then id else HM.filterWithKey keep) valuedaccts where keep name amts = isInteresting name amts || name `HM.member` interestingParents displayedName name = case accountlistmode_ ropts of ALTree -> DisplayName name leaf . max 0 $ level - boringParents ALFlat -> DisplayName name droppedName 1 where droppedName = accountNameDrop (drop_ ropts) name leaf = accountNameFromComponents . reverse . map accountLeafName $ droppedName : takeWhile notDisplayed parents level = max 0 $ accountNameLevel name - drop_ ropts parents = take (level - 1) $ parentAccountNames name boringParents = if no_elide_ ropts then 0 else length $ filter notDisplayed parents notDisplayed = not . (`HM.member` displayedAccts) -- Accounts interesting for their own sake isInteresting name amts = d <= qdepth -- Throw out anything too deep && ( name `Set.member` unelidableaccts -- Unelidable accounts should be kept unless too deep ||(empty_ ropts && keepWhenEmpty amts) -- Keep empty accounts when called with --empty || not (isZeroRow balance amts) -- Keep everything with a non-zero balance in the row ) where d = accountNameLevel name keepWhenEmpty = case accountlistmode_ ropts of ALFlat -> const True -- Keep all empty accounts in flat mode ALTree -> all (null . asubs) -- Keep only empty leaves in tree mode balance = maybeStripPrices . case accountlistmode_ ropts of ALTree | d == qdepth -> aibalance _ -> aebalance where maybeStripPrices = if conversionop_ ropts == Just NoConversionOp then id else mixedAmountStripPrices -- Accounts interesting because they are a fork for interesting subaccounts interestingParents = dbg5 "interestingParents" $ case accountlistmode_ ropts of ALTree -> HM.filterWithKey hasEnoughSubs numSubs ALFlat -> mempty where hasEnoughSubs name nsubs = nsubs >= minSubs && accountNameLevel name > drop_ ropts minSubs = if no_elide_ ropts then 1 else 2 isZeroRow balance = all (mixedAmountLooksZero . balance) qdepth = fromMaybe maxBound $ queryDepth query numSubs = subaccountTallies . HM.keys $ HM.filterWithKey isInteresting valuedaccts -- | Sort the rows by amount or by account declaration order. sortRows :: ReportOpts -> Journal -> [MultiBalanceReportRow] -> [MultiBalanceReportRow] sortRows ropts j | sort_amount_ ropts, ALTree <- accountlistmode_ ropts = sortTreeMBRByAmount | sort_amount_ ropts, ALFlat <- accountlistmode_ ropts = sortFlatMBRByAmount | otherwise = sortMBRByAccountDeclaration where -- Sort the report rows, representing a tree of accounts, by row total at each level. -- Similar to sortMBRByAccountDeclaration/sortAccountNamesByDeclaration. sortTreeMBRByAmount :: [MultiBalanceReportRow] -> [MultiBalanceReportRow] sortTreeMBRByAmount rows = mapMaybe (`HM.lookup` rowMap) sortedanames where accounttree = accountTree "root" $ map prrFullName rows rowMap = HM.fromList $ map (\row -> (prrFullName row, row)) rows -- Set the inclusive balance of an account from the rows, or sum the -- subaccounts if it's not present accounttreewithbals = mapAccounts setibalance accounttree setibalance a = a{aibalance = maybe (maSum . map aibalance $ asubs a) prrTotal $ HM.lookup (aname a) rowMap} sortedaccounttree = sortAccountTreeByAmount (fromMaybe NormallyPositive $ normalbalance_ ropts) accounttreewithbals sortedanames = map aname $ drop 1 $ flattenAccounts sortedaccounttree -- Sort the report rows, representing a flat account list, by row total (and then account name). sortFlatMBRByAmount :: [MultiBalanceReportRow] -> [MultiBalanceReportRow] sortFlatMBRByAmount = case fromMaybe NormallyPositive $ normalbalance_ ropts of NormallyPositive -> sortOn (\r -> (Down $ amt r, prrFullName r)) NormallyNegative -> sortOn (\r -> (amt r, prrFullName r)) where amt = mixedAmountStripPrices . prrTotal -- Sort the report rows by account declaration order then account name. sortMBRByAccountDeclaration :: [MultiBalanceReportRow] -> [MultiBalanceReportRow] sortMBRByAccountDeclaration rows = sortRowsLike sortedanames rows where sortedanames = sortAccountNamesByDeclaration j (tree_ ropts) $ map prrFullName rows -- | Build the report totals row. -- -- Calculate the column totals. These are always the sum of column amounts. calculateTotalsRow :: ReportOpts -> [MultiBalanceReportRow] -> PeriodicReportRow () MixedAmount calculateTotalsRow ropts rows = PeriodicReportRow () coltotals grandtotal grandaverage where isTopRow row = flat_ ropts || not (any (`HM.member` rowMap) parents) where parents = init . expandAccountName $ prrFullName row rowMap = HM.fromList $ map (\row -> (prrFullName row, row)) rows colamts = transpose . map prrAmounts $ filter isTopRow rows coltotals :: [MixedAmount] = dbg5 "coltotals" $ map maSum colamts -- Calculate the grand total and average. These are always the sum/average -- of the column totals. -- Total for a cumulative/historical report is always the last column. grandtotal = case balanceaccum_ ropts of PerPeriod -> maSum coltotals _ -> lastDef nullmixedamt coltotals grandaverage = averageMixedAmounts coltotals -- | Map the report rows to percentages if needed reportPercent :: ReportOpts -> MultiBalanceReport -> MultiBalanceReport reportPercent ropts report@(PeriodicReport spans rows totalrow) | percent_ ropts = PeriodicReport spans (map percentRow rows) (percentRow totalrow) | otherwise = report where percentRow (PeriodicReportRow name rowvals rowtotal rowavg) = PeriodicReportRow name (zipWith perdivide rowvals $ prrAmounts totalrow) (perdivide rowtotal $ prrTotal totalrow) (perdivide rowavg $ prrAverage totalrow) -- | Transpose a Map of HashMaps to a HashMap of Maps. -- -- Makes sure that all DateSpans are present in all rows. transposeMap :: [(DateSpan, HashMap AccountName a)] -> HashMap AccountName (Map DateSpan a) transposeMap = foldr (uncurry addSpan) mempty where addSpan spn acctmap seen = HM.foldrWithKey (addAcctSpan spn) seen acctmap addAcctSpan spn acct a = HM.alter f acct where f = Just . M.insert spn a . fromMaybe mempty -- | A sorting helper: sort a list of things (eg report rows) keyed by account name -- to match the provided ordering of those same account names. sortRowsLike :: [AccountName] -> [PeriodicReportRow DisplayName b] -> [PeriodicReportRow DisplayName b] sortRowsLike sortedas rows = mapMaybe (`HM.lookup` rowMap) sortedas where rowMap = HM.fromList $ map (\row -> (prrFullName row, row)) rows -- | Given a list of account names, find all forking parent accounts, i.e. -- those which fork between different branches subaccountTallies :: [AccountName] -> HashMap AccountName Int subaccountTallies = foldr incrementParent mempty . expandAccountNames where incrementParent a = HM.insertWith (+) (parentAccountName a) 1 -- | A helper: what percentage is the second mixed amount of the first ? -- Keeps the sign of the first amount. -- Uses unifyMixedAmount to unify each argument and then divides them. -- Both amounts should be in the same, single commodity. -- This can call error if the arguments are not right. perdivide :: MixedAmount -> MixedAmount -> MixedAmount perdivide a b = fromMaybe (error' errmsg) $ do -- PARTIAL: a' <- unifyMixedAmount a b' <- unifyMixedAmount b guard $ amountIsZero a' || amountIsZero b' || acommodity a' == acommodity b' return $ mixed [per $ if aquantity b' == 0 then 0 else aquantity a' / abs (aquantity b') * 100] where errmsg = "Cannot calculate percentages if accounts have different commodities (Hint: Try --cost, -V or similar flags.)" -- Add the values of two accounts. Should be right-biased, since it's used -- in scanl, so other properties (such as anumpostings) stay in the right place sumAcct :: Account -> Account -> Account sumAcct Account{aibalance=i1,aebalance=e1} a@Account{aibalance=i2,aebalance=e2} = a{aibalance = i1 `maPlus` i2, aebalance = e1 `maPlus` e2} -- Subtract the values in one account from another. Should be left-biased. subtractAcct :: Account -> Account -> Account subtractAcct a@Account{aibalance=i1,aebalance=e1} Account{aibalance=i2,aebalance=e2} = a{aibalance = i1 `maMinus` i2, aebalance = e1 `maMinus` e2} -- | Extract period changes from a cumulative list periodChanges :: Account -> Map k Account -> Map k Account periodChanges start amtmap = M.fromDistinctAscList . zip dates $ zipWith subtractAcct amts (start:amts) where (dates, amts) = unzip $ M.toAscList amtmap -- | Calculate a cumulative sum from a list of period changes. cumulativeSum :: Account -> Map DateSpan Account -> Map DateSpan Account cumulativeSum start = snd . M.mapAccum (\a b -> let s = sumAcct a b in (s, s)) start -- | Given a table representing a multi-column balance report (for example, -- made using 'balanceReportAsTable'), render it in a format suitable for -- console output. Amounts with more than two commodities will be elided -- unless --no-elide is used. balanceReportTableAsText :: ReportOpts -> Tab.Table T.Text T.Text WideBuilder -> TB.Builder balanceReportTableAsText ReportOpts{..} = Tab.renderTableByRowsB def{Tab.tableBorders=False, Tab.prettyTable=pretty_} renderCh renderRow where renderCh | layout_ /= LayoutBare || transpose_ = fmap (Tab.textCell Tab.TopRight) | otherwise = zipWith ($) (Tab.textCell Tab.TopLeft : repeat (Tab.textCell Tab.TopRight)) renderRow (rh, row) | layout_ /= LayoutBare || transpose_ = (Tab.textCell Tab.TopLeft rh, fmap (Tab.Cell Tab.TopRight . pure) row) | otherwise = (Tab.textCell Tab.TopLeft rh, zipWith ($) (Tab.Cell Tab.TopLeft : repeat (Tab.Cell Tab.TopRight)) (fmap pure row)) -- tests tests_MultiBalanceReport = testGroup "MultiBalanceReport" [ let amt0 = Amount {acommodity="$", aquantity=0, aprice=Nothing, astyle=AmountStyle {ascommodityside = L, ascommodityspaced = False, asprecision = Precision 2, asdecimalpoint = Just '.', asdigitgroups = Nothing}} (rspec,journal) `gives` r = do let rspec' = rspec{_rsQuery=And [queryFromFlags $ _rsReportOpts rspec, _rsQuery rspec]} (eitems, etotal) = r (PeriodicReport _ aitems atotal) = multiBalanceReport rspec' journal showw (PeriodicReportRow a lAmt amt amt') = (displayFull a, displayName a, displayDepth a, map showMixedAmountDebug lAmt, showMixedAmountDebug amt, showMixedAmountDebug amt') (map showw aitems) @?= (map showw eitems) showMixedAmountDebug (prrTotal atotal) @?= showMixedAmountDebug etotal -- we only check the sum of the totals in testGroup "multiBalanceReport" [ testCase "null journal" $ (defreportspec, nulljournal) `gives` ([], nullmixedamt) ,testCase "with -H on a populated period" $ (defreportspec{_rsReportOpts=defreportopts{period_= PeriodBetween (fromGregorian 2008 1 1) (fromGregorian 2008 1 2), balanceaccum_=Historical}}, samplejournal) `gives` ( [ PeriodicReportRow (flatDisplayName "assets:bank:checking") [mixedAmount $ usd 1] (mixedAmount $ usd 1) (mixedAmount amt0{aquantity=1}) , PeriodicReportRow (flatDisplayName "income:salary") [mixedAmount $ usd (-1)] (mixedAmount $ usd (-1)) (mixedAmount amt0{aquantity=(-1)}) ], mixedAmount $ usd 0) -- ,testCase "a valid history on an empty period" $ -- (defreportopts{period_= PeriodBetween (fromGregorian 2008 1 2) (fromGregorian 2008 1 3), balanceaccum_=Historical}, samplejournal) `gives` -- ( -- [ -- ("assets:bank:checking","checking",3, [mamountp' "$1.00"], mamountp' "$1.00",mixedAmount amt0 {aquantity=1}) -- ,("income:salary","salary",2, [mamountp' "$-1.00"], mamountp' "$-1.00",mixedAmount amt0 {aquantity=(-1)}) -- ], -- mixedAmount usd0) -- ,testCase "a valid history on an empty period (more complex)" $ -- (defreportopts{period_= PeriodBetween (fromGregorian 2009 1 1) (fromGregorian 2009 1 2), balanceaccum_=Historical}, samplejournal) `gives` -- ( -- [ -- ("assets:bank:checking","checking",3, [mamountp' "$1.00"], mamountp' "$1.00",mixedAmount amt0 {aquantity=1}) -- ,("assets:bank:saving","saving",3, [mamountp' "$1.00"], mamountp' "$1.00",mixedAmount amt0 {aquantity=1}) -- ,("assets:cash","cash",2, [mamountp' "$-2.00"], mamountp' "$-2.00",mixedAmount amt0 {aquantity=(-2)}) -- ,("expenses:food","food",2, [mamountp' "$1.00"], mamountp' "$1.00",mixedAmount amt0 {aquantity=(1)}) -- ,("expenses:supplies","supplies",2, [mamountp' "$1.00"], mamountp' "$1.00",mixedAmount amt0 {aquantity=(1)}) -- ,("income:gifts","gifts",2, [mamountp' "$-1.00"], mamountp' "$-1.00",mixedAmount amt0 {aquantity=(-1)}) -- ,("income:salary","salary",2, [mamountp' "$-1.00"], mamountp' "$-1.00",mixedAmount amt0 {aquantity=(-1)}) -- ], -- mixedAmount usd0) ] ] hledger-lib-1.30/Hledger/Reports/PostingsReport.hs0000644000000000000000000005544514434445206020372 0ustar0000000000000000{-| Postings report, used by the register command. -} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} module Hledger.Reports.PostingsReport ( PostingsReport, PostingsReportItem, postingsReport, mkpostingsReportItem, -- * Tests tests_PostingsReport ) where import Data.List (nub, sortOn) import Data.List.Extra (nubSort) import Data.Maybe (isJust, isNothing) import Data.Text (Text) import Data.Time.Calendar (Day) import Safe (headMay) import Hledger.Data import Hledger.Query import Hledger.Utils import Hledger.Reports.ReportOptions -- | A postings report is a list of postings with a running total, and a little extra -- transaction info to help with rendering. -- This is used eg for the register command. type PostingsReport = [PostingsReportItem] -- line items, one per posting type PostingsReportItem = (Maybe Day -- The posting date, if this is the first posting in a -- transaction or if it's different from the previous -- posting's date. Or if this a summary posting, the -- report interval's start date if this is the first -- summary posting in the interval. ,Maybe Period -- If this is a summary posting, the report interval's period. ,Maybe Text -- The posting's transaction's description, if this is the first posting in the transaction. ,Posting -- The posting, possibly with the account name depth-clipped. ,MixedAmount -- The running total after this posting, or with --average, -- the running average posting amount. With --historical, -- postings before the report start date are included in -- the running total/average. ) -- | A summary posting summarises the activity in one account within a report -- interval. It is by a regular Posting with no description, the interval's -- start date stored as the posting date, and the interval's Period attached -- with a tuple. type SummaryPosting = (Posting, Period) -- | Select postings from the journal and add running balance and other -- information to make a postings report. Used by eg hledger's register command. postingsReport :: ReportSpec -> Journal -> PostingsReport postingsReport rspec@ReportSpec{_rsReportOpts=ropts@ReportOpts{..}} j = items where (reportspan, colspans) = reportSpanBothDates j rspec whichdate = whichDate ropts mdepth = queryDepth $ _rsQuery rspec multiperiod = interval_ /= NoInterval -- postings to be included in the report, and similarly-matched postings before the report start date (precedingps, reportps) = matchedPostingsBeforeAndDuring rspec j reportspan -- Postings, or summary postings with their subperiod's end date, to be displayed. displayps :: [(Posting, Maybe Period)] | multiperiod = [(p', Just period') | (p', period') <- summariseps reportps] | otherwise = [(p', Nothing) | p' <- reportps] where summariseps = summarisePostingsByInterval whichdate mdepth showempty colspans showempty = empty_ || average_ -- Posting report items ready for display. items = dbg4 "postingsReport items" $ postingsReportItems displayps (nullposting,Nothing) whichdate mdepth startbal runningcalc startnum where -- In historical mode we'll need a starting balance, which we -- may be converting to value per hledger_options.m4.md "Effect -- of --value on reports". -- XXX balance report doesn't value starting balance.. should this ? historical = balanceaccum_ == Historical startbal | average_ = if historical then precedingavg else nullmixedamt | otherwise = if historical then precedingsum else nullmixedamt where precedingsum = sumPostings precedingps precedingavg = divideMixedAmount (fromIntegral $ length precedingps) precedingsum runningcalc = registerRunningCalculationFn ropts startnum = if historical then length precedingps + 1 else 1 -- | Based on the given report options, return a function that does the appropriate -- running calculation for the register report, ie a running average or running total. -- This function will take the item number, previous average/total, and new posting amount, -- and return the new average/total. registerRunningCalculationFn :: ReportOpts -> (Int -> MixedAmount -> MixedAmount -> MixedAmount) registerRunningCalculationFn ropts | average_ ropts = \i avg amt -> avg `maPlus` divideMixedAmount (fromIntegral i) (amt `maMinus` avg) | otherwise = \_ bal amt -> bal `maPlus` amt -- | Find postings matching a given query, within a given date span, -- and also any similarly-matched postings before that date span. -- Date restrictions and depth restrictions in the query are ignored. -- A helper for the postings report. matchedPostingsBeforeAndDuring :: ReportSpec -> Journal -> DateSpan -> ([Posting],[Posting]) matchedPostingsBeforeAndDuring rspec@ReportSpec{_rsReportOpts=ropts,_rsQuery=q} j reportspan = dbg5 "beforeps, duringps" $ span (beforestartq `matchesPosting`) beforeandduringps where beforestartq = dbg3 "beforestartq" $ dateqtype $ DateSpan Nothing (Exact <$> spanStart reportspan) beforeandduringps = sortOn (postingDateOrDate2 (whichDate ropts)) -- sort postings by date or date2 . (if invert_ ropts then map negatePostingAmount else id) -- with --invert, invert amounts . journalPostings -- With most calls we will not require transaction prices past this point, and can get a big -- speed improvement by stripping them early. In some cases, such as in hledger-ui, we still -- want to keep prices around, so we can toggle between cost and no cost quickly. We can use -- the show_costs_ flag to be efficient when we can, and detailed when we have to. . (if show_costs_ ropts then id else journalMapPostingAmounts mixedAmountStripPrices) $ journalValueAndFilterPostings rspec{_rsQuery=beforeandduringq} j -- filter postings by the query, with no start date or depth limit beforeandduringq = dbg4 "beforeandduringq" $ And [depthless $ dateless q, beforeendq] where depthless = filterQuery (not . queryIsDepth) dateless = filterQuery (not . queryIsDateOrDate2) beforeendq = dateqtype $ DateSpan Nothing (Exact <$> spanEnd reportspan) dateqtype = if queryIsDate2 dateq || (queryIsDate dateq && date2_ ropts) then Date2 else Date where dateq = dbg4 "dateq" $ filterQuery queryIsDateOrDate2 $ dbg4 "q" q -- XXX confused by multiple date:/date2: ? -- | Generate postings report line items from a list of postings or (with -- non-Nothing periods attached) summary postings. postingsReportItems :: [(Posting,Maybe Period)] -> (Posting,Maybe Period) -> WhichDate -> Maybe Int -> MixedAmount -> (Int -> MixedAmount -> MixedAmount -> MixedAmount) -> Int -> [PostingsReportItem] postingsReportItems [] _ _ _ _ _ _ = [] postingsReportItems ((p,mperiod):ps) (pprev,mperiodprev) wd d b runningcalcfn itemnum = i:(postingsReportItems ps (p,mperiod) wd d b' runningcalcfn (itemnum+1)) where i = mkpostingsReportItem showdate showdesc wd mperiod p' b' (showdate, showdesc) | isJust mperiod = (mperiod /= mperiodprev, False) | otherwise = (isfirstintxn || isdifferentdate, isfirstintxn) isfirstintxn = ptransaction p /= ptransaction pprev isdifferentdate = case wd of PrimaryDate -> postingDate p /= postingDate pprev SecondaryDate -> postingDate2 p /= postingDate2 pprev p' = p{paccount= clipOrEllipsifyAccountName d $ paccount p} b' = runningcalcfn itemnum b $ pamount p -- | Generate one postings report line item, containing the posting, -- the current running balance, and optionally the posting date and/or -- the transaction description. mkpostingsReportItem :: Bool -> Bool -> WhichDate -> Maybe Period -> Posting -> MixedAmount -> PostingsReportItem mkpostingsReportItem showdate showdesc wd mperiod p b = (if showdate then Just $ postingDateOrDate2 wd p else Nothing ,mperiod ,if showdesc then tdescription <$> ptransaction p else Nothing ,p ,b ) -- | Convert a list of postings into summary postings, one per interval, -- aggregated to the specified depth if any. -- Each summary posting will have a non-Nothing interval end date. summarisePostingsByInterval :: WhichDate -> Maybe Int -> Bool -> [DateSpan] -> [Posting] -> [SummaryPosting] summarisePostingsByInterval wd mdepth showempty colspans = concatMap (\(s,ps) -> summarisePostingsInDateSpan s wd mdepth showempty ps) -- Group postings into their columns. We try to be efficient, since -- there can possibly be a very large number of intervals (cf #1683) . groupByDateSpan showempty (postingDateOrDate2 wd) colspans -- | Given a date span (representing a report interval) and a list of -- postings within it, aggregate the postings into one summary posting per -- account. Each summary posting will have a non-Nothing interval end date. -- -- When a depth argument is present, postings to accounts of greater -- depth are also aggregated where possible. If the depth is 0, all -- postings in the span are aggregated into a single posting with -- account name "...". -- -- The showempty flag includes spans with no postings and also postings -- with 0 amount. -- summarisePostingsInDateSpan :: DateSpan -> WhichDate -> Maybe Int -> Bool -> [Posting] -> [SummaryPosting] summarisePostingsInDateSpan spn@(DateSpan b e) wd mdepth showempty ps | null ps && (isNothing b || isNothing e) = [] | null ps && showempty = [(summaryp, dateSpanAsPeriod spn)] | otherwise = summarypes where postingdate = if wd == PrimaryDate then postingDate else postingDate2 b' = maybe (maybe nulldate postingdate $ headMay ps) fromEFDay b summaryp = nullposting{pdate=Just b'} clippedanames = nub $ map (clipAccountName mdepth) anames summaryps | mdepth == Just 0 = [summaryp{paccount="...",pamount=sumPostings ps}] | otherwise = [summaryp{paccount=a,pamount=balance a} | a <- clippedanames] summarypes = map (, dateSpanAsPeriod spn) $ (if showempty then id else filter (not . mixedAmountLooksZero . pamount)) summaryps anames = nubSort $ map paccount ps -- aggregate balances by account, like ledgerFromJournal, then do depth-clipping accts = accountsFromPostings ps balance a = maybe nullmixedamt bal $ lookupAccount a accts where bal = if isclipped a then aibalance else aebalance isclipped a' = maybe False (accountNameLevel a' >=) mdepth negatePostingAmount :: Posting -> Posting negatePostingAmount = postingTransformAmount negate -- tests tests_PostingsReport = testGroup "PostingsReport" [ testCase "postingsReport" $ do let (query, journal) `gives` n = (length $ postingsReport defreportspec{_rsQuery=query} journal) @?= n -- with the query specified explicitly (Any, nulljournal) `gives` 0 (Any, samplejournal) `gives` 13 -- register --depth just clips account names (Depth 2, samplejournal) `gives` 13 (And [Depth 1, StatusQ Cleared, Acct (toRegex' "expenses")], samplejournal) `gives` 2 (And [And [Depth 1, StatusQ Cleared], Acct (toRegex' "expenses")], samplejournal) `gives` 2 -- with query and/or command-line options (length $ postingsReport defreportspec samplejournal) @?= 13 (length $ postingsReport defreportspec{_rsReportOpts=defreportopts{interval_=Months 1}} samplejournal) @?= 11 (length $ postingsReport defreportspec{_rsReportOpts=defreportopts{interval_=Months 1, empty_=True}} samplejournal) @?= 20 (length $ postingsReport defreportspec{_rsQuery=Acct $ toRegex' "assets:bank:checking"} samplejournal) @?= 5 -- (defreportopts, And [Acct "a a", Acct "'b"], samplejournal2) `gives` 0 -- [(Just (fromGregorian 2008 01 01,"income"),assets:bank:checking $1,$1) -- ,(Nothing,income:salary $-1,0) -- ,(Just (2008-06-01,"gift"),assets:bank:checking $1,$1) -- ,(Nothing,income:gifts $-1,0) -- ,(Just (2008-06-02,"save"),assets:bank:saving $1,$1) -- ,(Nothing,assets:bank:checking $-1,0) -- ,(Just (2008-06-03,"eat & shop"),expenses:food $1,$1) -- ,(Nothing,expenses:supplies $1,$2) -- ,(Nothing,assets:cash $-2,0) -- ,(Just (2008-12-31,"pay off"),liabilities:debts $1,$1) -- ,(Nothing,assets:bank:checking $-1,0) {- let opts = defreportopts (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank:checking $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank:checking $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank:saving $1 $1" ," assets:bank:checking $-1 0" ,"2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report with cleared option" ~: do let opts = defreportopts{cleared_=True} j <- readJournal' sample_journal_str (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report with uncleared option" ~: do let opts = defreportopts{uncleared_=True} j <- readJournal' sample_journal_str (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank:checking $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank:checking $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank:saving $1 $1" ," assets:bank:checking $-1 0" ] ,"postings report sorts by date" ~: do j <- readJournal' $ unlines ["2008/02/02 a" ," b 1" ," c" ,"" ,"2008/01/01 d" ," e 1" ," f" ] let opts = defreportopts registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/02/02"] ,"postings report with account pattern" ~: do j <- samplejournal let opts = defreportopts{patterns_=["cash"]} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop assets:cash $-2 $-2" ] ,"postings report with account pattern, case insensitive" ~: do j <- samplejournal let opts = defreportopts{patterns_=["cAsH"]} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/06/03 eat & shop assets:cash $-2 $-2" ] ,"postings report with display expression" ~: do j <- samplejournal let gives displayexpr = (registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is`) where opts = defreportopts "d<[2008/6/2]" `gives` ["2008/01/01","2008/06/01"] "d<=[2008/6/2]" `gives` ["2008/01/01","2008/06/01","2008/06/02"] "d=[2008/6/2]" `gives` ["2008/06/02"] "d>=[2008/6/2]" `gives` ["2008/06/02","2008/06/03","2008/12/31"] "d>[2008/6/2]" `gives` ["2008/06/03","2008/12/31"] ,"postings report with period expression" ~: do j <- samplejournal let periodexpr `gives` dates = do j' <- samplejournal registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j') `is` dates where opts = defreportopts{period_=Just $ parsePeriodExpr' date1 periodexpr} "" `gives` ["2008/01/01","2008/06/01","2008/06/02","2008/06/03","2008/12/31"] "2008" `gives` ["2008/01/01","2008/06/01","2008/06/02","2008/06/03","2008/12/31"] "2007" `gives` [] "june" `gives` ["2008/06/01","2008/06/02","2008/06/03"] "monthly" `gives` ["2008/01/01","2008/06/01","2008/12/01"] "quarterly" `gives` ["2008/01/01","2008/04/01","2008/10/01"] let opts = defreportopts{period_=Just $ parsePeriodExpr' date1 "yearly"} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 - 2008/12/31 assets:bank:saving $1 $1" ," assets:cash $-2 $-1" ," expenses:food $1 0" ," expenses:supplies $1 $1" ," income:gifts $-1 0" ," income:salary $-1 $-1" ," liabilities:debts $1 0" ] let opts = defreportopts{period_=Just $ parsePeriodExpr' date1 "quarterly"} registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/04/01","2008/10/01"] let opts = defreportopts{period_=Just $ parsePeriodExpr' date1 "quarterly",empty_=True} registerdates (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` ["2008/01/01","2008/04/01","2008/07/01","2008/10/01"] ] , "postings report with depth arg" ~: do j <- samplejournal let opts = defreportopts{depth_=Just 2} (postingsReportAsText opts $ postingsReport opts (queryFromOpts date1 opts) j) `is` unlines ["2008/01/01 income assets:bank $1 $1" ," income:salary $-1 0" ,"2008/06/01 gift assets:bank $1 $1" ," income:gifts $-1 0" ,"2008/06/02 save assets:bank $1 $1" ," assets:bank $-1 0" ,"2008/06/03 eat & shop expenses:food $1 $1" ," expenses:supplies $1 $2" ," assets:cash $-2 0" ,"2008/12/31 pay off liabilities:debts $1 $1" ," assets:bank $-1 0" ] -} ,testCase "summarisePostingsByInterval" $ summarisePostingsByInterval PrimaryDate Nothing False [DateSpan Nothing Nothing] [] @?= [] -- ,tests_summarisePostingsInDateSpan = [ -- "summarisePostingsInDateSpan" ~: do -- let gives (b,e,depth,showempty,ps) = -- (summarisePostingsInDateSpan (DateSpan b e) depth showempty ps `is`) -- let ps = -- [ -- nullposting{lpdescription="desc",lpaccount="expenses:food:groceries",lpamount=mixedAmount (usd 1)} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food:dining", lpamount=mixedAmount (usd 2)} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food", lpamount=mixedAmount (usd 4)} -- ,nullposting{lpdescription="desc",lpaccount="expenses:food:dining", lpamount=mixedAmount (usd 8)} -- ] -- ("2008/01/01","2009/01/01",0,9999,False,[]) `gives` -- [] -- ("2008/01/01","2009/01/01",0,9999,True,[]) `gives` -- [ -- nullposting{lpdate=fromGregorian 2008 01 01,lpdescription="- 2008/12/31"} -- ] -- ("2008/01/01","2009/01/01",0,9999,False,ts) `gives` -- [ -- nullposting{lpdate=fromGregorian 2008 01 01,lpdescription="- 2008/12/31",lpaccount="expenses:food", lpamount=mixedAmount (usd 4)} -- ,nullposting{lpdate=fromGregorian 2008 01 01,lpdescription="- 2008/12/31",lpaccount="expenses:food:dining", lpamount=mixedAmount (usd 10)} -- ,nullposting{lpdate=fromGregorian 2008 01 01,lpdescription="- 2008/12/31",lpaccount="expenses:food:groceries",lpamount=mixedAmount (usd 1)} -- ] -- ("2008/01/01","2009/01/01",0,2,False,ts) `gives` -- [ -- nullposting{lpdate=fromGregorian 2008 01 01,lpdescription="- 2008/12/31",lpaccount="expenses:food",lpamount=mixedAmount (usd 15)} -- ] -- ("2008/01/01","2009/01/01",0,1,False,ts) `gives` -- [ -- nullposting{lpdate=fromGregorian 2008 01 01,lpdescription="- 2008/12/31",lpaccount="expenses",lpamount=mixedAmount (usd 15)} -- ] -- ("2008/01/01","2009/01/01",0,0,False,ts) `gives` -- [ -- nullposting{lpdate=fromGregorian 2008 01 01,lpdescription="- 2008/12/31",lpaccount="",lpamount=mixedAmount (usd 15)} -- ] ] hledger-lib-1.30/Hledger/Utils.hs0000644000000000000000000002205514434445206015021 0ustar0000000000000000{-| Utilities used throughout hledger, or needed low in the module hierarchy. These are the bottom of hledger's module graph. -} module Hledger.Utils ( -- * Functions applyN, mapM', sequence', curry2, uncurry2, curry3, uncurry3, curry4, uncurry4, -- * Lists maximum', maximumStrict, minimumStrict, splitAtElement, sumStrict, -- * Trees treeLeaves, -- * Tuples first3, second3, third3, first4, second4, third4, fourth4, first5, second5, third5, fourth5, fifth5, first6, second6, third6, fourth6, fifth6, sixth6, -- * Misc multicol, numDigitsInt, makeHledgerClassyLenses, -- * Other module Hledger.Utils.Debug, module Hledger.Utils.Parse, module Hledger.Utils.IO, module Hledger.Utils.Regex, module Hledger.Utils.String, module Hledger.Utils.Text, -- * Tests tests_Utils, module Hledger.Utils.Test, ) where import Data.Char (toLower) import Data.List (intersperse) import Data.List.Extra (chunksOf, foldl', foldl1', uncons, unsnoc) import qualified Data.Set as Set import qualified Data.Text as T (pack, unpack) import Data.Tree (foldTree, Tree (Node, subForest)) import Language.Haskell.TH (DecsQ, Name, mkName, nameBase) import Lens.Micro ((&), (.~)) import Lens.Micro.TH (DefName(TopName), lensClass, lensField, makeLensesWith, classyRules) import Hledger.Utils.Debug import Hledger.Utils.Parse import Hledger.Utils.IO import Hledger.Utils.Regex import Hledger.Utils.String import Hledger.Utils.Text import Hledger.Utils.Test -- Functions -- | Apply a function the specified number of times, -- which should be > 0 (otherwise does nothing). -- Possibly uses O(n) stack ? applyN :: Int -> (a -> a) -> a -> a applyN n f | n < 1 = id | otherwise = (!! n) . iterate f -- from protolude, compare -- applyN :: Int -> (a -> a) -> a -> a -- applyN n f = X.foldr (.) identity (X.replicate n f) -- | Like mapM but uses sequence'. {-# INLINABLE mapM' #-} mapM' :: Monad f => (a -> f b) -> [a] -> f [b] mapM' f = sequence' . map f -- | This is a version of sequence based on difference lists. It is -- slightly faster but we mostly use it because it uses the heap -- instead of the stack. This has the advantage that Neil Mitchell’s -- trick of limiting the stack size to discover space leaks doesn’t -- show this as a false positive. {-# INLINABLE sequence' #-} sequence' :: Monad f => [f a] -> f [a] sequence' ms = do h <- go id ms return (h []) where go h [] = return h go h (m:ms') = do x <- m go (h . (x :)) ms' curry2 :: ((a, b) -> c) -> a -> b -> c curry2 f x y = f (x, y) uncurry2 :: (a -> b -> c) -> (a, b) -> c uncurry2 f (x, y) = f x y curry3 :: ((a, b, c) -> d) -> a -> b -> c -> d curry3 f x y z = f (x, y, z) uncurry3 :: (a -> b -> c -> d) -> (a, b, c) -> d uncurry3 f (x, y, z) = f x y z curry4 :: ((a, b, c, d) -> e) -> a -> b -> c -> d -> e curry4 f w x y z = f (w, x, y, z) uncurry4 :: (a -> b -> c -> d -> e) -> (a, b, c, d) -> e uncurry4 f (w, x, y, z) = f w x y z -- Lists -- | Total version of maximum, for integral types, giving 0 for an empty list. maximum' :: Integral a => [a] -> a maximum' [] = 0 maximum' xs = maximumStrict xs -- | Strict version of maximum that doesn’t leak space {-# INLINABLE maximumStrict #-} maximumStrict :: Ord a => [a] -> a maximumStrict = foldl1' max -- | Strict version of minimum that doesn’t leak space {-# INLINABLE minimumStrict #-} minimumStrict :: Ord a => [a] -> a minimumStrict = foldl1' min splitAtElement :: Eq a => a -> [a] -> [[a]] splitAtElement x l = case l of [] -> [] e:es | e==x -> split es es -> split es where split es = let (first,rest) = break (x==) es in first : splitAtElement x rest -- | Strict version of sum that doesn’t leak space {-# INLINABLE sumStrict #-} sumStrict :: Num a => [a] -> a sumStrict = foldl' (+) 0 -- Trees -- | Get the leaves of this tree as a list. -- The topmost node ("root" in hledger account trees) is not counted as a leaf. treeLeaves :: Tree a -> [a] treeLeaves Node{subForest=[]} = [] treeLeaves t = foldTree (\a bs -> (if null bs then (a:) else id) $ concat bs) t -- Tuples first3 (x,_,_) = x second3 (_,x,_) = x third3 (_,_,x) = x first4 (x,_,_,_) = x second4 (_,x,_,_) = x third4 (_,_,x,_) = x fourth4 (_,_,_,x) = x first5 (x,_,_,_,_) = x second5 (_,x,_,_,_) = x third5 (_,_,x,_,_) = x fourth5 (_,_,_,x,_) = x fifth5 (_,_,_,_,x) = x first6 (x,_,_,_,_,_) = x second6 (_,x,_,_,_,_) = x third6 (_,_,x,_,_,_) = x fourth6 (_,_,_,x,_,_) = x fifth6 (_,_,_,_,x,_) = x sixth6 (_,_,_,_,_,x) = x -- Misc -- | Convert a list of strings to a multi-line multi-column list -- fitting within the given width. Not wide character aware. multicol :: Int -> [String] -> String multicol _ [] = [] multicol width strs = let maxwidth = maximum' $ map length strs numcols = min (length strs) (width `div` (maxwidth+2)) itemspercol = length strs `div` numcols colitems = chunksOf itemspercol strs cols = map unlines colitems sep = " " in T.unpack $ textConcatBottomPadded $ map T.pack $ intersperse sep cols -- | Find the number of digits of an 'Int'. {-# INLINE numDigitsInt #-} numDigitsInt :: Integral a => Int -> a numDigitsInt n | n == minBound = 19 -- negate minBound is out of the range of Int | n < 0 = go (negate n) | otherwise = go n where go a | a < 10 = 1 | a < 100 = 2 | a < 1000 = 3 | a < 10000 = 4 | a >= 10000000000000000 = 16 + go (a `quot` 10000000000000000) | a >= 100000000 = 8 + go (a `quot` 100000000) | otherwise = 4 + go (a `quot` 10000) -- | Make classy lenses for Hledger options fields. -- This is intended to be used with BalancingOpts, InputOpt, ReportOpts, -- ReportSpec, and CliOpts. -- When run on X, it will create a typeclass named HasX (except for ReportOpts, -- which will be named HasReportOptsNoUpdate) containing all the lenses for that type. -- If the field name starts with an underscore, the lens name will be created -- by stripping the underscore from the front on the name. If the field name ends with -- an underscore, the field name ends with an underscore, the lens name will be -- mostly created by stripping the underscore, but a few names for which this -- would create too many conflicts instead have a second underscore appended. -- ReportOpts fields for which updating them requires updating the query in -- ReportSpec are instead names by dropping the trailing underscore and -- appending NoUpdate to the name, e.g. querystring_ -> querystringNoUpdate. -- -- There are a few reasons for the complicated rules. -- - We have some legacy field names ending in an underscore (e.g. value_) -- which we want to temporarily accommodate, before eventually switching to -- a more modern style (e.g. _rsReportOpts) -- - Certain fields in ReportOpts need to update the enclosing ReportSpec when -- they are updated, and it is a common programming error to forget to do -- this. We append NoUpdate to those lenses which will not update the -- enclosing field, and reserve the shorter name for manually define lenses -- (or at least something lens-like) which will update the ReportSpec. -- cf. the lengthy discussion here and in surrounding comments: -- https://github.com/simonmichael/hledger/pull/1545#issuecomment-881974554 makeHledgerClassyLenses :: Name -> DecsQ makeHledgerClassyLenses x = flip makeLensesWith x $ classyRules & lensField .~ (\_ _ n -> fieldName $ nameBase n) & lensClass .~ (className . nameBase) where fieldName n | Just ('_', name) <- uncons n = [TopName (mkName name)] | Just (name, '_') <- unsnoc n, name `Set.member` queryFields = [TopName (mkName $ name ++ "NoUpdate")] | Just (name, '_') <- unsnoc n, name `Set.member` commonFields = [TopName (mkName $ name ++ "__")] | Just (name, '_') <- unsnoc n = [TopName (mkName name)] | otherwise = [] -- Fields which would cause too many conflicts if we exposed lenses with these names. commonFields = Set.fromList [ "empty", "drop", "color", "transpose" -- ReportOpts , "anon", "new", "auto" -- InputOpts , "rawopts", "file", "debug", "width" -- CliOpts ] -- When updating some fields of ReportOpts within a ReportSpec, we need to -- update the rsQuery term as well. To do this we implement a special -- HasReportOpts class with some special behaviour. We therefore give the -- basic lenses a special NoUpdate name to avoid conflicts. className "ReportOpts" = Just (mkName "HasReportOptsNoUpdate", mkName "reportOptsNoUpdate") className (x':xs) = Just (mkName ("Has" ++ x':xs), mkName (toLower x' : xs)) className [] = Nothing -- Fields of ReportOpts which need to update the Query when they are updated. queryFields = Set.fromList ["period", "statuses", "depth", "date2", "real", "querystring"] tests_Utils = testGroup "Utils" [ tests_Text ] hledger-lib-1.30/Hledger/Utils/Debug.hs0000644000000000000000000003445614434445206016057 0ustar0000000000000000{- | Here are fancier versions of Debug.Trace, with these features: - unsafePerformIO-based for easy usage in pure code, IO code, and program startup code - reasonably short and memorable function names - pretty-printing haskell values, with or without colour, using pretty-simple - enabling/disabling debug output with --debug - multiple debug verbosity levels, from 1 to 9 - sending debug output to stderr or to a log file - enabling logging based on program name The basic "trace" functions print to stderr. This debug output will be interleaved with the program's normal output, which can be useful for understanding when code executes. The "Log" functions log to a file instead. The need for these is arguable, since a technically savvy user can redirect stderr output to a log file, eg: @CMD 2>debug.log@. But here is how they currently work: The "traceLog" functions log to the program's debug log file, which is @PROGNAME.log@ in the current directory, where PROGNAME is the program name returned by @getProgName@. When using this logging feature you should call @withProgName@ explicitly at the start of your program to ensure a stable program name, otherwise it can change to "" eg when running in GHCI. Eg: @main = withProgName "MYPROG" $ do ...@. The "OrLog" functions can either print to stderr or log to a file. - By default, they print to stderr. - If the program name has been set (with @withProgName) to something ending with ".log", they log to that file instead. This can be useful for programs which should never print to stderr, eg TUI programs like hledger-ui. The "At" functions produce output only when the program was run with a sufficiently high debug level, as set by a @--debug[=N]@ command line option. N ranges from 1 (least debug output) to 9 (most debug output), @--debug@ with no argument means 1. The "dbgN*" functions are intended to be the most convenient API, to be embedded at points of interest in your code. They combine the conditional output of "At", the conditional logging of "OrLog", pretty printing, and short searchable function names. Parsing the command line, detecting program name, and file logging is done with unsafePerformIO. If you are working in GHCI, changing the debug level requires editing and reloading this file (sometimes it's more convenient to add a dbg0 temporarily). In hledger, debug levels are used as follows: @ Debug level: What to show: ------------ --------------------------------------------------------- 0 normal command output only (no warnings, eg) 1 useful warnings, most common troubleshooting info, eg valuation 2 common troubleshooting info, more detail 3 report options selection 4 report generation 5 report generation, more detail 6 input file reading 7 input file reading, more detail 8 command line parsing 9 any other rarely needed / more in-depth info @ -} -- Disabled until 0.1.2.0 is released with windows support -- This module also exports Debug.Trace and the breakpoint package's Debug.Breakpoint. -- more: -- http://hackage.haskell.org/packages/archive/TraceUtils/0.1.0.2/doc/html/Debug-TraceUtils.html -- http://hackage.haskell.org/packages/archive/trace-call/0.1/doc/html/Debug-TraceCall.html -- http://hackage.haskell.org/packages/archive/htrace/0.1/doc/html/Debug-HTrace.html -- http://hackage.haskell.org/packages/archive/traced/2009.7.20/doc/html/Debug-Traced.html -- https://hackage.haskell.org/package/debug module Hledger.Utils.Debug ( debugLevel -- * Tracing to stderr ,traceWith ,traceAt ,traceAtWith ,ptrace ,ptraceAt ,ptraceAtIO -- * Logging to PROGNAME.log ,traceLog ,traceLogAt ,traceLogIO ,traceLogAtIO ,traceLogWith ,traceLogAtWith ,ptraceLogAt ,ptraceLogAtIO -- * Tracing or logging based on shouldLog ,traceOrLog ,traceOrLogAt ,ptraceOrLogAt ,ptraceOrLogAtIO ,traceOrLogAtWith -- * Pretty tracing/logging in pure code ,dbg0 ,dbg1 ,dbg2 ,dbg3 ,dbg4 ,dbg5 ,dbg6 ,dbg7 ,dbg8 ,dbg9 ,dbgExit -- * Pretty tracing/logging in IO ,dbg0IO ,dbg1IO ,dbg2IO ,dbg3IO ,dbg4IO ,dbg5IO ,dbg6IO ,dbg7IO ,dbg8IO ,dbg9IO -- * Tracing/logging with a show function ,dbg0With ,dbg1With ,dbg2With ,dbg3With ,dbg4With ,dbg5With ,dbg6With ,dbg7With ,dbg8With ,dbg9With -- * Re-exports -- ,module Debug.Breakpoint ,module Debug.Trace ) where import Control.DeepSeq (force) import Control.Exception (evaluate) import Control.Monad.IO.Class (MonadIO, liftIO) import Data.List hiding (uncons) -- import Debug.Breakpoint import Debug.Trace (trace, traceIO, traceShowId) import Safe (readDef) import System.Environment (getProgName) import System.Exit (exitFailure) import System.IO.Unsafe (unsafePerformIO) import Hledger.Utils.IO (progArgs, pshow, pshow') -- | The program name as returned by @getProgName@. -- It's best to set this explicitly at program startup with @withProgName@, -- otherwise when running in GHCI (eg) it will change to "". -- Setting it with a ".log" suffix causes some functions below -- to log instead of trace. {-# NOINLINE modifiedProgName #-} modifiedProgName :: String modifiedProgName = unsafePerformIO getProgName -- | The progam name, with any ".log" suffix removed. progName :: String progName = if ".log" `isSuffixOf` modifiedProgName then reverse $ drop 4 $ reverse modifiedProgName else modifiedProgName -- | The programs debug output verbosity. The default is 0 meaning no debug output. -- The @--debug@ command line flag sets it to 1, or @--debug=N@ sets it to -- a higher value (the = is required). Uses unsafePerformIO. -- When running in GHCI, changing this requires reloading this module. debugLevel :: Int debugLevel = case dropWhile (/="--debug") progArgs of ["--debug"] -> 1 "--debug":n:_ -> readDef 1 n _ -> case take 1 $ filter ("--debug" `isPrefixOf`) progArgs of ['-':'-':'d':'e':'b':'u':'g':'=':v] -> readDef 1 v _ -> 0 -- | Trace a value with the given show function before returning it. traceWith :: (a -> String) -> a -> a traceWith f a = trace (f a) a -- | Pretty-trace a showable value before returning it. -- Like Debug.Trace.traceShowId, but pretty-printing and easier to type. ptrace :: Show a => a -> a ptrace = traceWith pshow -- | Trace (print to stderr) a string if the global debug level is at -- or above the specified level. At level 0, always prints. Otherwise, -- uses unsafePerformIO. traceAt :: Int -> String -> a -> a traceAt level | level > 0 && debugLevel < level = const id | otherwise = trace -- | Trace (print to stderr) a showable value using a custom show function, -- if the global debug level is at or above the specified level. -- At level 0, always prints. Otherwise, uses unsafePerformIO. traceAtWith :: Int -> (a -> String) -> a -> a traceAtWith level f a = traceAt level (f a) a -- | Pretty-print a label and a showable value to the console -- if the global debug level is at or above the specified level. -- At level 0, always prints. Otherwise, uses unsafePerformIO. ptraceAt :: Show a => Int -> String -> a -> a ptraceAt level | level > 0 && debugLevel < level = const id | otherwise = \lbl a -> trace (labelledPretty True lbl a) a -- Pretty-print a showable value with a label, with or without allowing ANSI color. labelledPretty :: Show a => Bool -> String -> a -> String labelledPretty allowcolour lbl a = lbl ++ ":" ++ nlorspace ++ intercalate "\n" ls' where ls = lines $ (if allowcolour then pshow else pshow') a nlorspace | length ls > 1 = "\n" | otherwise = replicate (max 1 $ 11 - length lbl) ' ' ls' | length ls > 1 = map (' ':) ls | otherwise = ls -- | Like ptraceAt, but convenient to insert in an IO monad and -- enforces monadic sequencing. ptraceAtIO :: (MonadIO m, Show a) => Int -> String -> a -> m () ptraceAtIO level label a = if level > 0 && debugLevel < level then return () else liftIO $ traceIO (labelledPretty True label a) -- | Should the "trace or log" functions output to a file instead of stderr ? -- True if the program name ends with ".log". shouldLog :: Bool shouldLog = ".log" `isSuffixOf` modifiedProgName -- | The debug log file: PROGNAME.log in the current directory. -- See modifiedProgName. debugLogFile :: FilePath debugLogFile = progName ++ ".log" -- -- | The debug log file: debug.log in the current directory. -- debugLogFile :: FilePath -- debugLogFile = "debug.log" -- | Log a string to the debug log before returning the second argument. -- Uses unsafePerformIO. traceLog :: String -> a -> a traceLog s x = unsafePerformIO $ do evaluate (force s) -- to complete any previous logging before we attempt more appendFile debugLogFile (s ++ "\n") return x -- | Log a string to the debug log before returning the second argument, -- if the global debug level is at or above the specified level. -- At level 0, always logs. Otherwise, uses unsafePerformIO. traceLogAt :: Int -> String -> a -> a traceLogAt level str | level > 0 && debugLevel < level = id | otherwise = traceLog str -- | Like traceLog but sequences properly in IO. traceLogIO :: MonadIO m => String -> m () traceLogIO s = do liftIO $ evaluate (force s) -- to complete any previous logging before we attempt more liftIO $ appendFile debugLogFile (s ++ "\n") -- | Like traceLogAt, but convenient to use in IO. traceLogAtIO :: MonadIO m => Int -> String -> m () traceLogAtIO level str | level > 0 && debugLevel < level = return () | otherwise = traceLogIO str -- | Log a value to the debug log with the given show function before returning it. traceLogWith :: (a -> String) -> a -> a traceLogWith f a = traceLog (f a) a -- | Log a string to the debug log before returning the second argument, -- if the global debug level is at or above the specified level. -- At level 0, always logs. Otherwise, uses unsafePerformIO. traceLogAtWith :: Int -> (a -> String) -> a -> a traceLogAtWith level f a = traceLogAt level (f a) a -- | Pretty-log a label and showable value to the debug log, -- if the global debug level is at or above the specified level. -- At level 0, always prints. Otherwise, uses unsafePerformIO. ptraceLogAt :: Show a => Int -> String -> a -> a ptraceLogAt level | level > 0 && debugLevel < level = const id | otherwise = \lbl a -> traceLog (labelledPretty False lbl a) a -- | Like ptraceAt, but convenient to insert in an IO monad and -- enforces monadic sequencing. ptraceLogAtIO :: (MonadIO m, Show a) => Int -> String -> a -> m () ptraceLogAtIO level label a = if level > 0 && debugLevel < level then return () else traceLogIO (labelledPretty False label a) -- Trace or log a string depending on shouldLog, -- before returning the second argument. traceOrLog :: String -> a -> a traceOrLog = if shouldLog then trace else traceLog -- Trace or log a string depending on shouldLog, -- when global debug level is at or above the specified level, -- before returning the second argument. traceOrLogAt :: Int -> String -> a -> a traceOrLogAt = if shouldLog then traceLogAt else traceAt -- Pretty-trace or log depending on shouldLog, when global debug level -- is at or above the specified level. ptraceOrLogAt :: Show a => Int -> String -> a -> a ptraceOrLogAt = if shouldLog then ptraceLogAt else ptraceAt -- Like ptraceOrLogAt, but convenient in IO. ptraceOrLogAtIO :: (MonadIO m, Show a) => Int -> String -> a -> m () ptraceOrLogAtIO = if shouldLog then ptraceLogAtIO else ptraceAtIO -- Trace or log, with a show function, depending on shouldLog. traceOrLogAtWith :: Int -> (a -> String) -> a -> a traceOrLogAtWith = if shouldLog then traceLogAtWith else traceAtWith -- | Pretty-trace to stderr (or log to debug log) a label and showable value, -- then return it. dbg0 :: Show a => String -> a -> a dbg0 = ptraceOrLogAt 0 -- | Pretty-trace to stderr (or log to debug log) a label and showable value -- if the --debug level is high enough, then return the value. -- Uses unsafePerformIO. dbg1 :: Show a => String -> a -> a dbg1 = ptraceOrLogAt 1 dbg2 :: Show a => String -> a -> a dbg2 = ptraceOrLogAt 2 dbg3 :: Show a => String -> a -> a dbg3 = ptraceOrLogAt 3 dbg4 :: Show a => String -> a -> a dbg4 = ptraceOrLogAt 4 dbg5 :: Show a => String -> a -> a dbg5 = ptraceOrLogAt 5 dbg6 :: Show a => String -> a -> a dbg6 = ptraceOrLogAt 6 dbg7 :: Show a => String -> a -> a dbg7 = ptraceOrLogAt 7 dbg8 :: Show a => String -> a -> a dbg8 = ptraceOrLogAt 8 dbg9 :: Show a => String -> a -> a dbg9 = ptraceOrLogAt 9 -- | Like dbg0, but also exit the program. Uses unsafePerformIO. dbgExit :: Show a => String -> a -> a dbgExit label a = unsafePerformIO $ dbg0IO label a >> exitFailure -- | Like dbgN, but convenient to use in IO. dbg0IO :: (MonadIO m, Show a) => String -> a -> m () dbg0IO = ptraceOrLogAtIO 0 dbg1IO :: (MonadIO m, Show a) => String -> a -> m () dbg1IO = ptraceOrLogAtIO 1 dbg2IO :: (MonadIO m, Show a) => String -> a -> m () dbg2IO = ptraceOrLogAtIO 2 dbg3IO :: (MonadIO m, Show a) => String -> a -> m () dbg3IO = ptraceOrLogAtIO 3 dbg4IO :: (MonadIO m, Show a) => String -> a -> m () dbg4IO = ptraceOrLogAtIO 4 dbg5IO :: (MonadIO m, Show a) => String -> a -> m () dbg5IO = ptraceOrLogAtIO 5 dbg6IO :: (MonadIO m, Show a) => String -> a -> m () dbg6IO = ptraceOrLogAtIO 6 dbg7IO :: (MonadIO m, Show a) => String -> a -> m () dbg7IO = ptraceOrLogAtIO 7 dbg8IO :: (MonadIO m, Show a) => String -> a -> m () dbg8IO = ptraceOrLogAtIO 8 dbg9IO :: (MonadIO m, Show a) => String -> a -> m () dbg9IO = ptraceOrLogAtIO 9 -- | Like dbgN, but taking a show function instead of a label. dbg0With :: (a -> String) -> a -> a dbg0With = traceOrLogAtWith 0 dbg1With :: Show a => (a -> String) -> a -> a dbg1With = traceOrLogAtWith 1 dbg2With :: Show a => (a -> String) -> a -> a dbg2With = traceOrLogAtWith 2 dbg3With :: Show a => (a -> String) -> a -> a dbg3With = traceOrLogAtWith 3 dbg4With :: Show a => (a -> String) -> a -> a dbg4With = traceOrLogAtWith 4 dbg5With :: Show a => (a -> String) -> a -> a dbg5With = traceOrLogAtWith 5 dbg6With :: Show a => (a -> String) -> a -> a dbg6With = traceOrLogAtWith 6 dbg7With :: Show a => (a -> String) -> a -> a dbg7With = traceOrLogAtWith 7 dbg8With :: Show a => (a -> String) -> a -> a dbg8With = traceOrLogAtWith 8 dbg9With :: Show a => (a -> String) -> a -> a dbg9With = traceOrLogAtWith 9 hledger-lib-1.30/Hledger/Utils/IO.hs0000644000000000000000000004256614434445206015341 0ustar0000000000000000{- | Helpers for pretty-printing haskell values, reading command line arguments, working with ANSI colours, files, and time. Uses unsafePerformIO. Limitations: When running in GHCI, this module must be reloaded to see environmental changes. The colour scheme may be somewhat hard-coded. -} {-# LANGUAGE CPP, LambdaCase #-} module Hledger.Utils.IO ( -- * Pretty showing/printing pshow, pshow', pprint, pprint', -- * Viewing with pager pager, setupPager, -- * Terminal size getTerminalHeightWidth, getTerminalHeight, getTerminalWidth, -- * Command line arguments progArgs, outputFileOption, hasOutputFile, -- * ANSI color colorOption, useColorOnStdout, useColorOnStderr, -- XXX needed for using color/bgColor/colorB/bgColorB, but clashing with UIUtils: -- Color(..), -- ColorIntensity(..), color, bgColor, colorB, bgColorB, -- bold', faint', black', red', green', yellow', blue', magenta', cyan', white', brightBlack', brightRed', brightGreen', brightYellow', brightBlue', brightMagenta', brightCyan', brightWhite', rgb', terminalIsLight, terminalLightness, terminalFgColor, terminalBgColor, -- * Errors error', usageError, -- * Files embedFileRelative, expandHomePath, expandPath, expandGlob, sortByModTime, readFileOrStdinPortably, readFilePortably, readHandlePortably, -- hereFileRelative, -- * Time getCurrentLocalTime, getCurrentZonedTime, ) where import Control.Monad (when, forM) import Data.Colour.RGBSpace (RGB(RGB)) import Data.Colour.RGBSpace.HSL (lightness) import Data.FileEmbed (makeRelativeToProject, embedStringFile) import Data.List hiding (uncons) import Data.Maybe (isJust) import qualified Data.Text as T import qualified Data.Text.IO as T import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy.Builder as TB import Data.Time.Clock (getCurrentTime) import Data.Time.LocalTime (LocalTime, ZonedTime, getCurrentTimeZone, utcToLocalTime, utcToZonedTime) import Data.Word (Word8, Word16) import Language.Haskell.TH.Syntax (Q, Exp) import String.ANSI import System.Console.ANSI (Color(..),ColorIntensity(..), ConsoleLayer(..), SGR(..), hSupportsANSIColor, setSGRCode, getLayerColor) import System.Console.Terminal.Size (Window (Window), size) import System.Directory (getHomeDirectory, getModificationTime) import System.Environment (getArgs, lookupEnv, setEnv) import System.FilePath (isRelative, ()) import System.IO (Handle, IOMode (..), hGetEncoding, hSetEncoding, hSetNewlineMode, openFile, stdin, stdout, stderr, universalNewlineMode, utf8_bom, hIsTerminalDevice) import System.IO.Unsafe (unsafePerformIO) #ifndef mingw32_HOST_OS import System.Pager (printOrPage) #endif import Text.Pretty.Simple (CheckColorTty(..), OutputOptions(..), defaultOutputOptionsDarkBg, defaultOutputOptionsNoColor, pShowOpt, pPrintOpt) import Hledger.Utils.Text (WideBuilder(WideBuilder)) import System.FilePath.Glob (glob) import Data.Functor ((<&>)) -- Pretty showing/printing with pretty-simple -- https://hackage.haskell.org/package/pretty-simple/docs/Text-Pretty-Simple.html#t:OutputOptions -- | pretty-simple options with colour enabled if allowed. prettyopts = (if useColorOnStderr then defaultOutputOptionsDarkBg else defaultOutputOptionsNoColor) { outputOptionsIndentAmount = 2 -- , outputOptionsCompact = True -- fills lines, but does not respect page width (https://github.com/cdepillabout/pretty-simple/issues/126) -- , outputOptionsPageWidth = fromMaybe 80 $ unsafePerformIO getTerminalWidth } -- | pretty-simple options with colour disabled. prettyoptsNoColor = defaultOutputOptionsNoColor { outputOptionsIndentAmount=2 } -- | Pretty show. An easier alias for pretty-simple's pShow. -- This will probably show in colour if useColorOnStderr is true. pshow :: Show a => a -> String pshow = TL.unpack . pShowOpt prettyopts -- | Monochrome version of pshow. This will never show in colour. pshow' :: Show a => a -> String pshow' = TL.unpack . pShowOpt prettyoptsNoColor -- | Pretty print a showable value. An easier alias for pretty-simple's pPrint. -- This will print in colour if useColorOnStderr is true. pprint :: Show a => a -> IO () pprint = pPrintOpt (if useColorOnStderr then CheckColorTty else NoCheckColorTty) prettyopts -- | Monochrome version of pprint. This will never print in colour. pprint' :: Show a => a -> IO () pprint' = pPrintOpt NoCheckColorTty prettyoptsNoColor -- "Avoid using pshow, pprint, dbg* in the code below to prevent infinite loops." (?) -- | Display the given text on the terminal, using the user's $PAGER if the text is taller -- than the current terminal and stdout is interactive and TERM is not "dumb" -- (except on Windows, where a pager will not be used). -- If the text contains ANSI codes, because hledger thinks the current terminal -- supports those, the pager should be configured to display those, otherwise -- users will see junk on screen (#2015). -- We call "setLessR" at hledger startup to make that less likely. pager :: String -> IO () #ifdef mingw32_HOST_OS pager = putStrLn #else printOrPage' s = do -- an extra check for Emacs users: dumbterm <- (== Just "dumb") <$> lookupEnv "TERM" if dumbterm then putStrLn s else printOrPage (T.pack s) pager = printOrPage' #endif -- | An alternative to ansi-terminal's getTerminalSize, based on -- the more robust-looking terminal-size package. -- Tries to get stdout's terminal's current height and width. getTerminalHeightWidth :: IO (Maybe (Int,Int)) getTerminalHeightWidth = fmap (fmap unwindow) size where unwindow (Window h w) = (h,w) getTerminalHeight :: IO (Maybe Int) getTerminalHeight = fmap fst <$> getTerminalHeightWidth getTerminalWidth :: IO (Maybe Int) getTerminalWidth = fmap snd <$> getTerminalHeightWidth -- | Make sure our $LESS and $MORE environment variables contain R, -- to help ensure the common pager `less` will show our ANSI output properly. -- less uses $LESS by default, and $MORE when it is invoked as `more`. -- What the original `more` program does, I'm not sure. -- If $PAGER is configured to something else, this probably will have no effect. setupPager :: IO () setupPager = do let addR var = do mv <- lookupEnv var setEnv var $ case mv of Nothing -> "R" Just v -> ('R':v) addR "LESS" addR "MORE" -- Command line arguments -- | The command line arguments that were used at program startup. -- Uses unsafePerformIO. {-# NOINLINE progArgs #-} progArgs :: [String] progArgs = unsafePerformIO getArgs -- | Read the value of the -o/--output-file command line option provided at program startup, -- if any, using unsafePerformIO. outputFileOption :: Maybe String outputFileOption = -- keep synced with output-file flag definition in hledger:CliOptions. let args = progArgs in case dropWhile (not . ("-o" `isPrefixOf`)) args of -- -oARG ('-':'o':v@(_:_)):_ -> Just v -- -o ARG "-o":v:_ -> Just v _ -> case dropWhile (/="--output-file") args of -- --output-file ARG "--output-file":v:_ -> Just v _ -> case take 1 $ filter ("--output-file=" `isPrefixOf`) args of -- --output=file=ARG ['-':'-':'o':'u':'t':'p':'u':'t':'-':'f':'i':'l':'e':'=':v] -> Just v _ -> Nothing -- | Check whether the -o/--output-file option has been used at program startup -- with an argument other than "-", using unsafePerformIO. hasOutputFile :: Bool hasOutputFile = outputFileOption `notElem` [Nothing, Just "-"] -- XXX shouldn't we check that stdout is interactive. instead ? -- ANSI colour ifAnsi f = if useColorOnStdout then f else id -- | Versions of some of text-ansi's string colors/styles which are more careful -- to not print junk onscreen. These use our useColorOnStdout. bold' :: String -> String bold' = ifAnsi bold faint' :: String -> String faint' = ifAnsi faint black' :: String -> String black' = ifAnsi black red' :: String -> String red' = ifAnsi red green' :: String -> String green' = ifAnsi green yellow' :: String -> String yellow' = ifAnsi yellow blue' :: String -> String blue' = ifAnsi blue magenta' :: String -> String magenta' = ifAnsi magenta cyan' :: String -> String cyan' = ifAnsi cyan white' :: String -> String white' = ifAnsi white brightBlack' :: String -> String brightBlack' = ifAnsi brightBlack brightRed' :: String -> String brightRed' = ifAnsi brightRed brightGreen' :: String -> String brightGreen' = ifAnsi brightGreen brightYellow' :: String -> String brightYellow' = ifAnsi brightYellow brightBlue' :: String -> String brightBlue' = ifAnsi brightBlue brightMagenta' :: String -> String brightMagenta' = ifAnsi brightMagenta brightCyan' :: String -> String brightCyan' = ifAnsi brightCyan brightWhite' :: String -> String brightWhite' = ifAnsi brightWhite rgb' :: Word8 -> Word8 -> Word8 -> String -> String rgb' r g b = ifAnsi (rgb r g b) -- | Read the value of the --color or --colour command line option provided at program startup -- using unsafePerformIO. If this option was not provided, returns the empty string. colorOption :: String colorOption = -- similar to debugLevel -- keep synced with color/colour flag definition in hledger:CliOptions let args = progArgs in case dropWhile (/="--color") args of -- --color ARG "--color":v:_ -> v _ -> case take 1 $ filter ("--color=" `isPrefixOf`) args of -- --color=ARG ['-':'-':'c':'o':'l':'o':'r':'=':v] -> v _ -> case dropWhile (/="--colour") args of -- --colour ARG "--colour":v:_ -> v _ -> case take 1 $ filter ("--colour=" `isPrefixOf`) args of -- --colour=ARG ['-':'-':'c':'o':'l':'o':'u':'r':'=':v] -> v _ -> "" -- | Check the IO environment to see if ANSI colour codes should be used on stdout. -- This is done using unsafePerformIO so it can be used anywhere, eg in -- low-level debug utilities, which should be ok since we are just reading. -- The logic is: use color if -- the program was started with --color=yes|always -- or ( -- the program was not started with --color=no|never -- and a NO_COLOR environment variable is not defined -- and stdout supports ANSI color -- and -o/--output-file was not used, or its value is "-" -- ). useColorOnStdout :: Bool useColorOnStdout = not hasOutputFile && useColorOnHandle stdout -- | Like useColorOnStdout, but checks for ANSI color support on stderr, -- and is not affected by -o/--output-file. useColorOnStderr :: Bool useColorOnStderr = useColorOnHandle stderr useColorOnHandle :: Handle -> Bool useColorOnHandle h = unsafePerformIO $ do no_color <- isJust <$> lookupEnv "NO_COLOR" supports_color <- hSupportsANSIColor h let coloroption = colorOption return $ coloroption `elem` ["always","yes"] || (coloroption `notElem` ["never","no"] && not no_color && supports_color) -- | Wrap a string in ANSI codes to set and reset foreground colour. -- ColorIntensity is @Dull@ or @Vivid@ (ie normal and bold). -- Color is one of @Black@, @Red@, @Green@, @Yellow@, @Blue@, @Magenta@, @Cyan@, @White@. -- Eg: @color Dull Red "text"@. color :: ColorIntensity -> Color -> String -> String color int col s = setSGRCode [SetColor Foreground int col] ++ s ++ setSGRCode [] -- | Wrap a string in ANSI codes to set and reset background colour. bgColor :: ColorIntensity -> Color -> String -> String bgColor int col s = setSGRCode [SetColor Background int col] ++ s ++ setSGRCode [] -- | Wrap a WideBuilder in ANSI codes to set and reset foreground colour. colorB :: ColorIntensity -> Color -> WideBuilder -> WideBuilder colorB int col (WideBuilder s w) = WideBuilder (TB.fromString (setSGRCode [SetColor Foreground int col]) <> s <> TB.fromString (setSGRCode [])) w -- | Wrap a WideBuilder in ANSI codes to set and reset background colour. bgColorB :: ColorIntensity -> Color -> WideBuilder -> WideBuilder bgColorB int col (WideBuilder s w) = WideBuilder (TB.fromString (setSGRCode [SetColor Background int col]) <> s <> TB.fromString (setSGRCode [])) w -- | Detect whether the terminal currently has a light background colour, -- if possible, using unsafePerformIO. -- If the terminal is transparent, its apparent light/darkness may be different. terminalIsLight :: Maybe Bool terminalIsLight = (> 0.5) <$> terminalLightness -- | Detect the terminal's current background lightness (0..1), if possible, using unsafePerformIO. -- If the terminal is transparent, its apparent lightness may be different. terminalLightness :: Maybe Float terminalLightness = lightness <$> terminalColor Background -- | Detect the terminal's current background colour, if possible, using unsafePerformIO. terminalBgColor :: Maybe (RGB Float) terminalBgColor = terminalColor Background -- | Detect the terminal's current foreground colour, if possible, using unsafePerformIO. terminalFgColor :: Maybe (RGB Float) terminalFgColor = terminalColor Foreground -- | Detect the terminal's current foreground or background colour, if possible, using unsafePerformIO. {-# NOINLINE terminalColor #-} terminalColor :: ConsoleLayer -> Maybe (RGB Float) terminalColor = unsafePerformIO . getLayerColor' -- A version of getLayerColor that is less likely to leak escape sequences to output, -- and that returns a RGB of Floats (0..1) that is more compatible with the colour package. -- This does nothing in a non-interactive context (eg when piping stdout to another command), -- inside emacs (emacs shell buffers show the escape sequence for some reason), -- or in a non-colour-supporting terminal. getLayerColor' :: ConsoleLayer -> IO (Maybe (RGB Float)) getLayerColor' l = do inemacs <- not.null <$> lookupEnv "INSIDE_EMACS" interactive <- hIsTerminalDevice stdout supportscolor <- hSupportsANSIColor stdout if inemacs || not interactive || not supportscolor then return Nothing else fmap fractionalRGB <$> getLayerColor l where fractionalRGB :: (Fractional a) => RGB Word16 -> RGB a fractionalRGB (RGB r g b) = RGB (fromIntegral r / 65535) (fromIntegral g / 65535) (fromIntegral b / 65535) -- chatgpt -- Errors -- | Simpler alias for errorWithoutStackTrace error' :: String -> a error' = errorWithoutStackTrace . ("Error: " <>) -- | A version of errorWithoutStackTrace that adds a usage hint. usageError :: String -> a usageError = error' . (++ " (use -h to see usage)") -- Files -- | Expand a tilde (representing home directory) at the start of a file path. -- ~username is not supported. Can raise an error. expandHomePath :: FilePath -> IO FilePath expandHomePath = \case ('~':'/':p) -> ( p) <$> getHomeDirectory ('~':'\\':p) -> ( p) <$> getHomeDirectory ('~':_) -> ioError $ userError "~USERNAME in paths is not supported" p -> return p -- | Given a current directory, convert a possibly relative, possibly tilde-containing -- file path to an absolute one. -- ~username is not supported. Leaves "-" unchanged. Can raise an error. expandPath :: FilePath -> FilePath -> IO FilePath -- general type sig for use in reader parsers expandPath _ "-" = return "-" expandPath curdir p = (if isRelative p then (curdir ) else id) <$> expandHomePath p -- | Like expandPath, but treats the expanded path as a glob, and returns -- zero or more matched absolute file paths, alphabetically sorted. expandGlob :: FilePath -> FilePath -> IO [FilePath] expandGlob curdir p = expandPath curdir p >>= glob <&> sort -- | Given a list of existing file paths, sort them by modification time, most recent first. sortByModTime :: [FilePath] -> IO [FilePath] sortByModTime fs = do ftimes <- forM fs $ \f -> do {t <- getModificationTime f; return (t,f)} return $ map snd $ reverse $ sort ftimes -- | Read text from a file, -- converting any \r\n line endings to \n,, -- using the system locale's text encoding, -- ignoring any utf8 BOM prefix (as seen in paypal's 2018 CSV, eg) if that encoding is utf8. readFilePortably :: FilePath -> IO T.Text readFilePortably f = openFile f ReadMode >>= readHandlePortably -- | Like readFilePortably, but read from standard input if the path is "-". readFileOrStdinPortably :: String -> IO T.Text readFileOrStdinPortably f = openFileOrStdin f ReadMode >>= readHandlePortably where openFileOrStdin :: String -> IOMode -> IO Handle openFileOrStdin "-" _ = return stdin openFileOrStdin f' m = openFile f' m readHandlePortably :: Handle -> IO T.Text readHandlePortably h = do hSetNewlineMode h universalNewlineMode menc <- hGetEncoding h when (fmap show menc == Just "UTF-8") $ -- XXX no Eq instance, rely on Show hSetEncoding h utf8_bom T.hGetContents h -- | Like embedFile, but takes a path relative to the package directory. embedFileRelative :: FilePath -> Q Exp embedFileRelative f = makeRelativeToProject f >>= embedStringFile -- -- | Like hereFile, but takes a path relative to the package directory. -- -- Similar to embedFileRelative ? -- hereFileRelative :: FilePath -> Q Exp -- hereFileRelative f = makeRelativeToProject f >>= hereFileExp -- where -- QuasiQuoter{quoteExp=hereFileExp} = hereFile -- Time getCurrentLocalTime :: IO LocalTime getCurrentLocalTime = do t <- getCurrentTime tz <- getCurrentTimeZone return $ utcToLocalTime tz t getCurrentZonedTime :: IO ZonedTime getCurrentZonedTime = do t <- getCurrentTime tz <- getCurrentTimeZone return $ utcToZonedTime tz t hledger-lib-1.30/Hledger/Utils/Parse.hs0000644000000000000000000001440614434445206016074 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} module Hledger.Utils.Parse ( SimpleStringParser, SimpleTextParser, TextParser, -- * SourcePos SourcePos(..), mkPos, unPos, initialPos, sourcePosPretty, sourcePosPairPretty, choice', choiceInState, surroundedBy, parsewith, runTextParser, rtp, parsewithString, parseWithState, parseWithState', fromparse, parseerror, showDateParseError, nonspace, isNewline, isNonNewlineSpace, restofline, eolof, spacenonewline, skipNonNewlineSpaces, skipNonNewlineSpaces1, skipNonNewlineSpaces', -- ** Trace the state of hledger parsers traceOrLogParse, dbgparse, -- * re-exports HledgerParseErrors, HledgerParseErrorData, customErrorBundlePretty, ) where import Control.Monad (when) import qualified Data.Text as T import Text.Megaparsec import Text.Printf import Control.Monad.State.Strict (StateT, evalStateT) import Data.Char import Data.Functor (void) import Data.Functor.Identity (Identity(..)) import Data.List import Data.Text (Text) import Text.Megaparsec.Char import Text.Megaparsec.Custom import Hledger.Utils.Debug (debugLevel, traceOrLog) -- | A parser of string to some type. type SimpleStringParser a = Parsec HledgerParseErrorData String a -- | A parser of strict text to some type. type SimpleTextParser = Parsec HledgerParseErrorData Text -- XXX an "a" argument breaks the CsvRulesParser declaration somehow -- | A parser of text that runs in some monad. type TextParser m a = ParsecT HledgerParseErrorData Text m a -- | Trace to stderr or log to debug log the provided label (if non-null) -- and current parser state (position and next input). -- See also: Hledger.Utils.Debug, megaparsec's dbg. -- Uses unsafePerformIO. traceOrLogParse :: String -> TextParser m () traceOrLogParse msg = do pos <- getSourcePos next <- (T.take peeklength) `fmap` getInput let (l,c) = (sourceLine pos, sourceColumn pos) s = printf "at line %2d col %2d: %s" (unPos l) (unPos c) (show next) :: String s' = printf ("%-"++show (peeklength+30)++"s") s ++ " " ++ msg traceOrLog s' $ return () where peeklength = 30 -- class (Stream s, MonadPlus m) => MonadParsec e s m -- dbgparse :: (MonadPlus m, MonadParsec e String m) => Int -> String -> m () -- | Trace to stderr or log to debug log the provided label (if non-null) -- and current parser state (position and next input), -- if the global debug level is at or above the specified level. -- Uses unsafePerformIO. dbgparse :: Int -> String -> TextParser m () dbgparse level msg = when (level <= debugLevel) $ traceOrLogParse msg -- | Render a pair of source positions in human-readable form, only displaying the range of lines. sourcePosPairPretty :: (SourcePos, SourcePos) -> String sourcePosPairPretty (SourcePos fp l1 _, SourcePos _ l2 c2) = fp ++ ":" ++ show (unPos l1) ++ "-" ++ show l2' where l2' = if unPos c2 == 1 then unPos l2 - 1 else unPos l2 -- might be at end of file with a final new line -- | Backtracking choice, use this when alternatives share a prefix. -- Consumes no input if all choices fail. choice' :: [TextParser m a] -> TextParser m a choice' = choice . map try -- | Backtracking choice, use this when alternatives share a prefix. -- Consumes no input if all choices fail. choiceInState :: [StateT s (ParsecT HledgerParseErrorData Text m) a] -> StateT s (ParsecT HledgerParseErrorData Text m) a choiceInState = choice . map try surroundedBy :: Applicative m => m openclose -> m a -> m a surroundedBy p = between p p parsewith :: Parsec e Text a -> Text -> Either (ParseErrorBundle Text e) a parsewith p = runParser p "" -- | Run a text parser in the identity monad. See also: parseWithState. runTextParser, rtp :: TextParser Identity a -> Text -> Either HledgerParseErrors a runTextParser = parsewith rtp = runTextParser parsewithString :: Parsec e String a -> String -> Either (ParseErrorBundle String e) a parsewithString p = runParser p "" -- | Run a stateful parser with some initial state on a text. -- See also: runTextParser, runJournalParser. parseWithState :: Monad m => st -> StateT st (ParsecT HledgerParseErrorData Text m) a -> Text -> m (Either HledgerParseErrors a) parseWithState ctx p = runParserT (evalStateT p ctx) "" parseWithState' :: (Stream s) => st -> StateT st (ParsecT e s Identity) a -> s -> (Either (ParseErrorBundle s e) a) parseWithState' ctx p = runParser (evalStateT p ctx) "" fromparse :: (Show t, Show (Token t), Show e) => Either (ParseErrorBundle t e) a -> a fromparse = either parseerror id parseerror :: (Show t, Show (Token t), Show e) => ParseErrorBundle t e -> a parseerror e = errorWithoutStackTrace $ showParseError e -- PARTIAL: showParseError :: (Show t, Show (Token t), Show e) => ParseErrorBundle t e -> String showParseError e = "parse error at " ++ show e showDateParseError :: (Show t, Show (Token t), Show e) => ParseErrorBundle t e -> String showDateParseError e = printf "date parse error (%s)" (intercalate ", " $ tail $ lines $ show e) isNewline :: Char -> Bool isNewline '\n' = True isNewline _ = False nonspace :: TextParser m Char nonspace = satisfy (not . isSpace) isNonNewlineSpace :: Char -> Bool isNonNewlineSpace c = not (isNewline c) && isSpace c spacenonewline :: (Stream s, Char ~ Token s) => ParsecT HledgerParseErrorData s m Char spacenonewline = satisfy isNonNewlineSpace {-# INLINABLE spacenonewline #-} restofline :: TextParser m String restofline = anySingle `manyTill` eolof -- Skip many non-newline spaces. skipNonNewlineSpaces :: (Stream s, Token s ~ Char) => ParsecT HledgerParseErrorData s m () skipNonNewlineSpaces = void $ takeWhileP Nothing isNonNewlineSpace {-# INLINABLE skipNonNewlineSpaces #-} -- Skip many non-newline spaces, failing if there are none. skipNonNewlineSpaces1 :: (Stream s, Token s ~ Char) => ParsecT HledgerParseErrorData s m () skipNonNewlineSpaces1 = void $ takeWhile1P Nothing isNonNewlineSpace {-# INLINABLE skipNonNewlineSpaces1 #-} -- Skip many non-newline spaces, returning True if any have been skipped. skipNonNewlineSpaces' :: (Stream s, Token s ~ Char) => ParsecT HledgerParseErrorData s m Bool skipNonNewlineSpaces' = True <$ skipNonNewlineSpaces1 <|> pure False {-# INLINABLE skipNonNewlineSpaces' #-} eolof :: TextParser m () eolof = void newline <|> eof hledger-lib-1.30/Hledger/Utils/Regex.hs0000644000000000000000000002660514434445206016100 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} {-| Easy regular expression helpers, currently based on regex-tdfa. These should: - be cross-platform, not requiring C libraries - support unicode - support extended regular expressions - support replacement, with backreferences etc. - support splitting - have mnemonic names - have simple monomorphic types - work with simple strings Regex strings are automatically compiled into regular expressions the first time they are seen, and these are cached. If you use a huge number of unique regular expressions this might lead to increased memory usage. Several functions have memoised variants (*Memo), which also trade space for time. Currently two APIs are provided: - The old partial one (with ' suffixes') which will call error on any problem (eg with malformed regexps). This comes from hledger's origin as a command-line tool. - The new total one which will return an error message. This is better for long-running apps like hledger-web. Current limitations: - (?i) and similar are not supported -} module Hledger.Utils.Regex ( -- * Regexp type and constructors Regexp(reString) ,toRegex ,toRegexCI ,toRegex' ,toRegexCI' -- * type aliases ,Replacement ,RegexError -- * total regex operations ,regexMatch ,regexMatchText ,regexReplace ,regexReplaceUnmemo ,regexReplaceAllBy ) where import Control.Monad (foldM) import Data.Aeson (ToJSON(..), Value(String)) import Data.Array ((!), elems, indices) import Data.Char (isDigit) import Data.List (foldl') import Data.MemoUgly (memo) import Data.Text (Text) import qualified Data.Text as T import Text.Regex.TDFA ( Regex, CompOption(..), defaultCompOpt, defaultExecOpt, makeRegexOptsM, AllMatches(getAllMatches), match, MatchText, RegexLike(..), RegexMaker(..), RegexOptions(..), RegexContext(..) ) -- | Regular expression. Extended regular expression-ish syntax ? But does not support eg (?i) syntax. data Regexp = Regexp { reString :: Text, reCompiled :: Regex } | RegexpCI { reString :: Text, reCompiled :: Regex } instance Eq Regexp where Regexp s1 _ == Regexp s2 _ = s1 == s2 RegexpCI s1 _ == RegexpCI s2 _ = s1 == s2 _ == _ = False instance Ord Regexp where Regexp s1 _ `compare` Regexp s2 _ = s1 `compare` s2 RegexpCI s1 _ `compare` RegexpCI s2 _ = s1 `compare` s2 Regexp _ _ `compare` RegexpCI _ _ = LT RegexpCI _ _ `compare` Regexp _ _ = GT instance Show Regexp where showsPrec d r = showParen (d > app_prec) $ reCons . showsPrec (app_prec+1) (T.unpack $ reString r) where app_prec = 10 reCons = case r of Regexp _ _ -> showString "Regexp " RegexpCI _ _ -> showString "RegexpCI " instance Read Regexp where readsPrec d r = readParen (d > app_prec) (\r' -> [(toRegexCI' m,t) | ("RegexCI",s) <- lex r', (m,t) <- readsPrec (app_prec+1) s]) r ++ readParen (d > app_prec) (\r' -> [(toRegex' m, t) | ("Regex",s) <- lex r', (m,t) <- readsPrec (app_prec+1) s]) r where app_prec = 10 instance ToJSON Regexp where toJSON (Regexp s _) = String $ "Regexp " <> s toJSON (RegexpCI s _) = String $ "RegexpCI " <> s instance RegexLike Regexp String where matchOnce = matchOnce . reCompiled matchAll = matchAll . reCompiled matchCount = matchCount . reCompiled matchTest = matchTest . reCompiled matchAllText = matchAllText . reCompiled matchOnceText = matchOnceText . reCompiled instance RegexContext Regexp String String where match = match . reCompiled matchM = matchM . reCompiled -- Convert a Regexp string to a compiled Regex, or return an error message. toRegex :: Text -> Either RegexError Regexp toRegex = memo $ \s -> mkRegexErr s (Regexp s <$> makeRegexM (T.unpack s)) -- Have to unpack here because Text instance in regex-tdfa only appears in 1.3.1 -- Like toRegex, but make a case-insensitive Regex. toRegexCI :: Text -> Either RegexError Regexp toRegexCI = memo $ \s -> mkRegexErr s (RegexpCI s <$> makeRegexOptsM defaultCompOpt{caseSensitive=False} defaultExecOpt (T.unpack s)) -- Have to unpack here because Text instance in regex-tdfa only appears in 1.3.1 -- | Make a nice error message for a regexp error. mkRegexErr :: Text -> Maybe a -> Either RegexError a mkRegexErr s = maybe (Left errmsg) Right where errmsg = T.unpack $ "This regular expression is malformed, please correct it:\n" <> s -- Convert a Regexp string to a compiled Regex, throw an error toRegex' :: Text -> Regexp toRegex' = either errorWithoutStackTrace id . toRegex -- Like toRegex', but make a case-insensitive Regex. toRegexCI' :: Text -> Regexp toRegexCI' = either errorWithoutStackTrace id . toRegexCI -- | A replacement pattern. May include numeric backreferences (\N). type Replacement = String -- | An error message arising during a regular expression operation. -- Eg: trying to compile a malformed regular expression, or -- trying to apply a malformed replacement pattern. type RegexError = String -- helpers -- | Test whether a Regexp matches a String. This is an alias for `matchTest` for consistent -- naming. regexMatch :: Regexp -> String -> Bool regexMatch = matchTest -- | Tests whether a Regexp matches a Text. -- -- This currently unpacks the Text to a String an works on that. This is due to -- a performance bug in regex-tdfa (#9), which may or may not be relevant here. regexMatchText :: Regexp -> Text -> Bool regexMatchText r = matchTest r . T.unpack -------------------------------------------------------------------------------- -- new total functions -- | A memoising version of regexReplace. Caches the result for each -- search pattern, replacement pattern, target string tuple. -- This won't generate a regular expression parsing error since that -- is pre-compiled nowadays, but there can still be a runtime error -- from the replacement pattern, eg with a backreference referring -- to a nonexistent match group. regexReplace :: Regexp -> Replacement -> String -> Either RegexError String regexReplace re repl = memo $ regexReplaceUnmemo re repl -- helpers: -- Replace this regular expression with this replacement pattern in this -- string, or return an error message. (There should be no regexp -- parsing errors these days since Regexp's compiled form is used, -- but there can still be a runtime error from the replacement -- pattern, eg a backreference referring to a nonexistent match group.) regexReplaceUnmemo :: Regexp -> Replacement -> String -> Either RegexError String regexReplaceUnmemo re repl str = foldM (replaceMatch repl) str (reverse $ match (reCompiled re) str :: [MatchText String]) where -- Replace one match within the string with the replacement text -- appropriate for this match. Or return an error message. replaceMatch :: Replacement -> String -> MatchText String -> Either RegexError String replaceMatch replpat s matchgroups = case elems matchgroups of [] -> Right s ((_,(off,len)):_) -> -- groups should have 0-based indexes, and there should always be at least one, since this is a match erpl >>= \rpl -> Right $ pre ++ rpl ++ post where (pre, post') = splitAt off s post = drop len post' -- The replacement text: the replacement pattern with all -- numeric backreferences replaced by the appropriate groups -- from this match. Or an error message. erpl = regexReplaceAllByM backrefRegex (lookupMatchGroup matchgroups) replpat where -- Given some match groups and a numeric backreference, -- return the referenced group text, or an error message. lookupMatchGroup :: MatchText String -> String -> Either RegexError String lookupMatchGroup grps ('\\':s2@(_:_)) | all isDigit s2 = case read s2 of n | n `elem` indices grps -> Right $ fst (grps ! n) -- PARTIAL: should not fail, all digits _ -> Left $ "no match group exists for backreference \"\\"++s++"\"" lookupMatchGroup _ s2 = Left $ "lookupMatchGroup called on non-numeric-backreference \""++s2++"\", shouldn't happen" backrefRegex = toRegex' "\\\\[0-9]+" -- PARTIAL: should not fail -- regexReplace' :: Regexp -> Replacement -> String -> String -- regexReplace' re repl s = -- foldl (replaceMatch repl) s (reverse $ match (reCompiled re) s :: [MatchText String]) -- where -- replaceMatch :: Replacement -> String -> MatchText String -> String -- replaceMatch replpat s matchgroups = pre ++ repl ++ post -- where -- ((_,(off,len)):_) = elems matchgroups -- groups should have 0-based indexes, and there should always be at least one, since this is a match -- (pre, post') = splitAt off s -- post = drop len post' -- repl = regexReplaceAllBy backrefRegex (lookupMatchGroup matchgroups) replpat -- where -- lookupMatchGroup :: MatchText String -> String -> String -- lookupMatchGroup grps ('\\':s@(_:_)) | all isDigit s = -- case read s of n | n `elem` indices grps -> fst (grps ! n) -- -- PARTIAL: -- _ -> error' $ "no match group exists for backreference \"\\"++s++"\"" -- lookupMatchGroup _ s = error' $ "lookupMatchGroup called on non-numeric-backreference \""++s++"\", shouldn't happen" -- backrefRegex = toRegex' "\\\\[0-9]+" -- PARTIAL: should not fail -- helpers -- adapted from http://stackoverflow.com/questions/9071682/replacement-substition-with-haskell-regex-libraries: -- Replace all occurrences of a regexp in a string, transforming each match -- with the given pure function. regexReplaceAllBy :: Regexp -> (String -> String) -> String -> String regexReplaceAllBy re transform s = prependdone rest where (_, rest, prependdone) = foldl' go (0, s, id) matches where matches = getAllMatches $ match (reCompiled re) s :: [(Int, Int)] -- offset and length go :: (Int,String,String->String) -> (Int,Int) -> (Int,String,String->String) go (pos,todo,prepend) (off,len) = let (prematch, matchandrest) = splitAt (off - pos) todo (matched, rest2) = splitAt len matchandrest in (off + len, rest2, prepend . (prematch++) . (transform matched ++)) -- Replace all occurrences of a regexp in a string, transforming each match -- with the given monadic function. Eg if the monad is Either, a Left result -- from the transform function short-circuits and is returned as the overall -- result. regexReplaceAllByM :: forall m. Monad m => Regexp -> (String -> m String) -> String -> m String regexReplaceAllByM re transform s = foldM go (0, s, id) matches >>= \(_, rest, prependdone) -> pure $ prependdone rest where matches = getAllMatches $ match (reCompiled re) s :: [(Int, Int)] -- offset and length go :: (Int,String,String->String) -> (Int,Int) -> m (Int,String,String->String) go (pos,todo,prepend) (off,len) = let (prematch, matchandrest) = splitAt (off - pos) todo (matched, rest) = splitAt len matchandrest in transform matched >>= \matched' -> pure (off + len, rest, prepend . (prematch++) . (matched' ++)) hledger-lib-1.30/Hledger/Utils/String.hs0000644000000000000000000001737714434445206016302 0ustar0000000000000000-- | String formatting helpers, starting to get a bit out of control. module Hledger.Utils.String ( takeEnd, -- * misc lowercase, uppercase, underline, stripbrackets, -- quoting quoteIfNeeded, singleQuoteIfNeeded, quoteForCommandLine, -- quotechars, -- whitespacechars, words', unwords', stripAnsi, -- * single-line layout strip, lstrip, rstrip, strip1Char, stripBy, strip1By, chomp, chomp1, singleline, elideLeft, elideRight, formatString, -- * wide-character-aware layout charWidth, strWidth, strWidthAnsi, takeWidth, ) where import Data.Char (isSpace, toLower, toUpper) import Data.List (intercalate, dropWhileEnd) import qualified Data.Text as T import Text.Megaparsec ((<|>), between, many, noneOf, sepBy) import Text.Megaparsec.Char (char) import Text.Printf (printf) import Hledger.Utils.Parse import Hledger.Utils.Regex (toRegex', regexReplace) import Text.DocLayout (charWidth, realLength) -- | Take elements from the end of a list. takeEnd n l = go (drop n l) l where go (_:xs) (_:ys) = go xs ys go [] r = r go _ [] = [] lowercase, uppercase :: String -> String lowercase = map toLower uppercase = map toUpper -- | Remove leading and trailing whitespace. strip :: String -> String strip = lstrip . rstrip -- | Remove leading whitespace. lstrip :: String -> String lstrip = dropWhile isSpace -- | Remove trailing whitespace. rstrip :: String -> String rstrip = reverse . lstrip . reverse -- | Strip the given starting and ending character -- from the start and end of a string if both are present. strip1Char :: Char -> Char -> String -> String strip1Char b e s = case s of (c:cs) | c==b, not $ null cs, last cs==e -> init cs _ -> s -- | Strip a run of zero or more characters matching the predicate -- from the start and end of a string. stripBy :: (Char -> Bool) -> String -> String stripBy f = dropWhileEnd f . dropWhile f -- | Strip a single balanced enclosing pair of a character matching the predicate -- from the start and end of a string. strip1By :: (Char -> Bool) -> String -> String strip1By f s = case s of (c:cs) | f c, not $ null cs, last cs==c -> init cs _ -> s -- | Remove all trailing newlines/carriage returns. chomp :: String -> String chomp = reverse . dropWhile (`elem` "\r\n") . reverse -- | Remove all trailing newline/carriage returns, leaving just one trailing newline. chomp1 :: String -> String chomp1 = (++"\n") . chomp -- | Remove consecutive line breaks, replacing them with single space singleline :: String -> String singleline = unwords . filter (/="") . (map strip) . lines stripbrackets :: String -> String stripbrackets = dropWhile (`elem` "([") . reverse . dropWhile (`elem` "])") . reverse :: String -> String elideLeft :: Int -> String -> String elideLeft width s = if length s > width then ".." ++ takeEnd (width - 2) s else s elideRight :: Int -> String -> String elideRight width s = if length s > width then take (width - 2) s ++ ".." else s -- | Clip and pad a string to a minimum & maximum width, and/or left/right justify it. -- Works on multi-line strings too (but will rewrite non-unix line endings). formatString :: Bool -> Maybe Int -> Maybe Int -> String -> String formatString leftJustified minwidth maxwidth s = intercalate "\n" $ map (printf fmt) $ lines s where justify = if leftJustified then "-" else "" minwidth' = maybe "" show minwidth maxwidth' = maybe "" (("."++).show) maxwidth fmt = "%" ++ justify ++ minwidth' ++ maxwidth' ++ "s" underline :: String -> String underline s = s' ++ replicate (length s) '-' ++ "\n" where s' | last s == '\n' = s | otherwise = s ++ "\n" -- | Double-quote this string if it contains whitespace, single quotes -- or double-quotes, escaping the quotes as needed. quoteIfNeeded :: String -> String quoteIfNeeded s | any (`elem` s) (quotechars++whitespacechars++redirectchars) = showChar '"' $ escapeQuotes s "\"" | otherwise = s where escapeQuotes [] x = x escapeQuotes ('"':cs) x = showString "\\\"" $ escapeQuotes cs x escapeQuotes (c:cs) x = showChar c $ escapeQuotes cs x -- | Single-quote this string if it contains whitespace or double-quotes. -- Does not work for strings containing single quotes. singleQuoteIfNeeded :: String -> String singleQuoteIfNeeded s | any (`elem` s) (quotechars++whitespacechars) = singleQuote s | otherwise = s -- | Prepend and append single quotes to a string. singleQuote :: String -> String singleQuote s = "'"++s++"'" -- | Try to single- and backslash-quote a string as needed to make it usable -- as an argument on a (sh/bash) shell command line. At least, well enough -- to handle common currency symbols, like $. Probably broken in many ways. -- -- >>> quoteForCommandLine "a" -- "a" -- >>> quoteForCommandLine "\"" -- "'\"'" -- >>> quoteForCommandLine "$" -- "'\\$'" -- quoteForCommandLine :: String -> String quoteForCommandLine s | any (`elem` s) (quotechars++whitespacechars++shellchars) = singleQuote $ quoteShellChars s | otherwise = s -- | Try to backslash-quote common shell-significant characters in this string. -- Doesn't handle single quotes, & probably others. quoteShellChars :: String -> String quoteShellChars = concatMap escapeShellChar where escapeShellChar c | c `elem` shellchars = ['\\',c] escapeShellChar c = [c] quotechars, whitespacechars, redirectchars, shellchars :: [Char] quotechars = "'\"" whitespacechars = " \t\n\r" redirectchars = "<>" shellchars = "<>(){}[]$7?#!~`" -- | Quote-aware version of words - don't split on spaces which are inside quotes. -- NB correctly handles "a'b" but not "''a''". Can raise an error if parsing fails. words' :: String -> [String] words' "" = [] words' s = map stripquotes $ fromparse $ parsewithString p s where p = (singleQuotedPattern <|> doubleQuotedPattern <|> patterns) `sepBy` skipNonNewlineSpaces1 -- eof patterns = many (noneOf whitespacechars) singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf "'") doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf "\"") -- | Quote-aware version of unwords - single-quote strings which contain whitespace unwords' :: [String] -> String unwords' = unwords . map quoteIfNeeded -- | Strip one matching pair of single or double quotes on the ends of a string. stripquotes :: String -> String stripquotes s = if isSingleQuoted s || isDoubleQuoted s then init $ tail s else s isSingleQuoted s@(_:_:_) = head s == '\'' && last s == '\'' isSingleQuoted _ = False isDoubleQuoted s@(_:_:_) = head s == '"' && last s == '"' isDoubleQuoted _ = False -- Functions below treat wide (eg CJK) characters as double-width. -- | Double-width-character-aware string truncation. Take as many -- characters as possible from a string without exceeding the -- specified width. Eg takeWidth 3 "りんご" = "り". takeWidth :: Int -> String -> String takeWidth _ "" = "" takeWidth 0 _ = "" takeWidth w (c:cs) | cw <= w = c:takeWidth (w-cw) cs | otherwise = "" where cw = charWidth c -- | Like strWidth, but also strips ANSI escape sequences before -- calculating the width. -- -- This is no longer used in code, as widths are calculated before -- adding ANSI escape sequences, but is being kept around for now. strWidthAnsi :: String -> Int strWidthAnsi = strWidth . stripAnsi -- | Alias for 'realLength'. strWidth :: String -> Int strWidth = realLength -- | Strip ANSI escape sequences from a string. -- -- >>> stripAnsi "\ESC[31m-1\ESC[m" -- "-1" stripAnsi :: String -> String stripAnsi s = either err id $ regexReplace ansire "" s where err = error "stripAnsi: invalid replacement pattern" -- PARTIAL, shouldn't happen ansire = toRegex' $ T.pack "\ESC\\[([0-9]+;)*([0-9]+)?[ABCDHJKfmsu]" -- PARTIAL, should succeed hledger-lib-1.30/Hledger/Utils/Test.hs0000644000000000000000000001505414434445206015741 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} module Hledger.Utils.Test ( module Test.Tasty ,module Test.Tasty.HUnit -- ,module QC -- ,module SC ,assertLeft ,assertRight ,assertParse ,assertParseEq ,assertParseEqOn ,assertParseError ,assertParseE ,assertParseEqE ,assertParseErrorE ,assertParseStateOn ) where import Control.Monad (unless) import Control.Monad.Except (ExceptT(..), liftEither, runExceptT, withExceptT) import Control.Monad.State.Strict (StateT, evalStateT, execStateT) import Data.Default (Default(..)) import Data.List (isInfixOf) import qualified Data.Text as T import Test.Tasty hiding (defaultMain) import Test.Tasty.HUnit -- import Test.Tasty.QuickCheck as QC -- import Test.Tasty.SmallCheck as SC import Text.Megaparsec import Text.Megaparsec.Custom ( HledgerParseErrorData, FinalParseError, attachSource, customErrorBundlePretty, finalErrorBundlePretty, ) import Hledger.Utils.IO (pshow) -- * tasty helpers -- TODO: pretty-print values in failure messages -- | Assert any Left value. assertLeft :: (HasCallStack, Eq b, Show b) => Either a b -> Assertion assertLeft (Left _) = return () assertLeft (Right b) = assertFailure $ "expected Left, got (Right " ++ show b ++ ")" -- | Assert any Right value. assertRight :: (HasCallStack, Eq a, Show a) => Either a b -> Assertion assertRight (Right _) = return () assertRight (Left a) = assertFailure $ "expected Right, got (Left " ++ show a ++ ")" -- | Run a parser on the given text and display a helpful error. parseHelper :: (HasCallStack, Default st, Monad m) => StateT st (ParsecT HledgerParseErrorData T.Text m) a -> T.Text -> ExceptT String m a parseHelper parser input = withExceptT (\e -> "\nparse error at " ++ customErrorBundlePretty e ++ "\n") . ExceptT $ runParserT (evalStateT (parser <* eof) def) "" input -- | Run a stateful parser in IO and process either a failure or success to -- produce an 'Assertion'. Suitable for hledger's JournalParser parsers. assertParseHelper :: (HasCallStack, Default st) => (String -> Assertion) -> (a -> Assertion) -> StateT st (ParsecT HledgerParseErrorData T.Text IO) a -> T.Text -> Assertion assertParseHelper onFailure onSuccess parser input = either onFailure onSuccess =<< runExceptT (parseHelper parser input) -- | Assert that this stateful parser runnable in IO successfully parses -- all of the given input text, showing the parse error if it fails. -- Suitable for hledger's JournalParser parsers. assertParse :: (HasCallStack, Default st) => StateT st (ParsecT HledgerParseErrorData T.Text IO) a -> T.Text -> Assertion assertParse = assertParseHelper assertFailure (const $ return ()) -- | Assert a parser produces an expected value. assertParseEq :: (HasCallStack, Eq a, Show a, Default st) => StateT st (ParsecT HledgerParseErrorData T.Text IO) a -> T.Text -> a -> Assertion assertParseEq parser input = assertParseEqOn parser input id -- | Like assertParseEq, but transform the parse result with the given function -- before comparing it. assertParseEqOn :: (HasCallStack, Eq b, Show b, Default st) => StateT st (ParsecT HledgerParseErrorData T.Text IO) a -> T.Text -> (a -> b) -> b -> Assertion assertParseEqOn parser input f expected = assertParseHelper assertFailure (assertEqual "" expected . f) parser input -- | Assert that this stateful parser runnable in IO fails to parse -- the given input text, with a parse error containing the given string. assertParseError :: (HasCallStack, Eq a, Show a, Default st) => StateT st (ParsecT HledgerParseErrorData T.Text IO) a -> T.Text -> String -> Assertion assertParseError parser input errstr = assertParseHelper (\e -> unless (errstr `isInfixOf` e) $ assertFailure $ "\nparse error is not as expected:" ++ e) (\v -> assertFailure $ "\nparse succeeded unexpectedly, producing:\n" ++ pshow v ++ "\n") parser input -- | Run a stateful parser in IO like assertParse, then assert that the -- final state (the wrapped state, not megaparsec's internal state), -- transformed by the given function, matches the given expected value. assertParseStateOn :: (HasCallStack, Eq b, Show b, Default st) => StateT st (ParsecT HledgerParseErrorData T.Text IO) a -> T.Text -> (st -> b) -> b -> Assertion assertParseStateOn parser input f expected = do es <- runParserT (execStateT (parser <* eof) def) "" input case es of Left err -> assertFailure $ (++"\n") $ ("\nparse error at "++) $ customErrorBundlePretty err Right s -> assertEqual "" expected $ f s -- | These "E" variants of the above are suitable for hledger's ErroringJournalParser parsers. parseHelperE :: (HasCallStack, Default st, Monad m) => StateT st (ParsecT HledgerParseErrorData T.Text (ExceptT FinalParseError m)) a -> T.Text -> ExceptT String m a parseHelperE parser input = do withExceptT (\e -> "\nparse error at " ++ customErrorBundlePretty e ++ "\n") . liftEither =<< withExceptT (\e -> "parse error at " ++ finalErrorBundlePretty (attachSource "" input e)) (runParserT (evalStateT (parser <* eof) def) "" input) assertParseHelperE :: (HasCallStack, Default st) => (String -> Assertion) -> (a -> Assertion) -> StateT st (ParsecT HledgerParseErrorData T.Text (ExceptT FinalParseError IO)) a -> T.Text -> Assertion assertParseHelperE onFailure onSuccess parser input = either onFailure onSuccess =<< runExceptT (parseHelperE parser input) assertParseE :: (HasCallStack, Eq a, Show a, Default st) => StateT st (ParsecT HledgerParseErrorData T.Text (ExceptT FinalParseError IO)) a -> T.Text -> Assertion assertParseE = assertParseHelperE assertFailure (const $ return ()) assertParseEqE :: (Default st, Eq a, Show a, HasCallStack) => StateT st (ParsecT HledgerParseErrorData T.Text (ExceptT FinalParseError IO)) a -> T.Text -> a -> Assertion assertParseEqE parser input = assertParseEqOnE parser input id assertParseEqOnE :: (HasCallStack, Eq b, Show b, Default st) => StateT st (ParsecT HledgerParseErrorData T.Text (ExceptT FinalParseError IO)) a -> T.Text -> (a -> b) -> b -> Assertion assertParseEqOnE parser input f expected = assertParseHelperE assertFailure (assertEqual "" expected . f) parser input assertParseErrorE :: (Default st, Eq a, Show a, HasCallStack) => StateT st (ParsecT HledgerParseErrorData T.Text (ExceptT FinalParseError IO)) a -> T.Text -> String -> Assertion assertParseErrorE parser input errstr = assertParseHelperE (\e -> unless (errstr `isInfixOf` e) $ assertFailure $ "\nparse error is not as expected:" ++ e) (\v -> assertFailure $ "\nparse succeeded unexpectedly, producing:\n" ++ pshow v ++ "\n") parser input hledger-lib-1.30/Hledger/Utils/Text.hs0000644000000000000000000002455014434445206015747 0ustar0000000000000000-- | Text formatting helpers, ported from String as needed. -- There may be better alternatives out there. {-# LANGUAGE OverloadedStrings #-} module Hledger.Utils.Text ( -- * misc -- lowercase, -- uppercase, -- underline, -- stripbrackets, textUnbracket, wrap, textChomp, -- quoting quoteIfSpaced, textQuoteIfNeeded, -- singleQuoteIfNeeded, -- quotechars, -- whitespacechars, escapeDoubleQuotes, -- escapeSingleQuotes, -- escapeQuotes, -- words', -- unwords', stripquotes, -- isSingleQuoted, -- isDoubleQuoted, -- * single-line layout -- elideLeft, textElideRight, formatText, -- * multi-line layout textConcatTopPadded, textConcatBottomPadded, fitText, linesPrepend, linesPrepend2, unlinesB, -- * wide-character-aware layout WideBuilder(..), wbToText, wbFromText, wbUnpack, textTakeWidth, -- * Reading readDecimal, -- * tests tests_Text ) where import Data.Char (digitToInt) import Data.Default (def) import Data.Maybe (catMaybes) import Data.Text (Text) import qualified Data.Text as T import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy.Builder as TB import Text.DocLayout (charWidth, realLength) import Test.Tasty (testGroup) import Test.Tasty.HUnit ((@?=), testCase) import Text.Tabular.AsciiWide (Align(..), Header(..), Properties(..), TableOpts(..), renderRow, textCell) import Text.WideString (WideBuilder(..), wbToText, wbFromText, wbUnpack) -- lowercase, uppercase :: String -> String -- lowercase = map toLower -- uppercase = map toUpper -- stripbrackets :: String -> String -- stripbrackets = dropWhile (`elem` "([") . reverse . dropWhile (`elem` "])") . reverse :: String -> String -- elideLeft :: Int -> String -> String -- elideLeft width s = -- if length s > width then ".." ++ reverse (take (width - 2) $ reverse s) else s textElideRight :: Int -> Text -> Text textElideRight width t = if T.length t > width then T.take (width - 2) t <> ".." else t -- | Wrap a Text with the surrounding Text. wrap :: Text -> Text -> Text -> Text wrap start end x = start <> x <> end -- | Remove trailing newlines/carriage returns. textChomp :: Text -> Text textChomp = T.dropWhileEnd (`elem` ['\r', '\n']) -- | Clip and pad a string to a minimum & maximum width, and/or left/right justify it. -- Works on multi-line strings too (but will rewrite non-unix line endings). formatText :: Bool -> Maybe Int -> Maybe Int -> Text -> Text formatText leftJustified minwidth maxwidth t = T.intercalate "\n" . map (pad . clip) $ if T.null t then [""] else T.lines t where pad = maybe id justify minwidth clip = maybe id T.take maxwidth justify n = if leftJustified then T.justifyLeft n ' ' else T.justifyRight n ' ' -- underline :: String -> String -- underline s = s' ++ replicate (length s) '-' ++ "\n" -- where s' -- | last s == '\n' = s -- | otherwise = s ++ "\n" -- | Wrap a string in double quotes, and \-prefix any embedded single -- quotes, if it contains whitespace and is not already single- or -- double-quoted. quoteIfSpaced :: T.Text -> T.Text quoteIfSpaced s | isSingleQuoted s || isDoubleQuoted s = s | not $ any (\c -> T.any (==c) s) whitespacechars = s | otherwise = textQuoteIfNeeded s -- -- | Wrap a string in double quotes, and \-prefix any embedded single -- -- quotes, if it contains whitespace and is not already single- or -- -- double-quoted. -- quoteIfSpaced :: String -> String -- quoteIfSpaced s | isSingleQuoted s || isDoubleQuoted s = s -- | not $ any (`elem` s) whitespacechars = s -- | otherwise = "'"++escapeSingleQuotes s++"'" -- -- | Double-quote this string if it contains whitespace, single quotes -- -- or double-quotes, escaping the quotes as needed. textQuoteIfNeeded :: T.Text -> T.Text textQuoteIfNeeded s | any (\c -> T.any (==c) s) (quotechars++whitespacechars) = "\"" <> escapeDoubleQuotes s <> "\"" | otherwise = s -- -- | Single-quote this string if it contains whitespace or double-quotes. -- -- No good for strings containing single quotes. -- singleQuoteIfNeeded :: String -> String -- singleQuoteIfNeeded s | any (`elem` s) whitespacechars = "'"++s++"'" -- | otherwise = s quotechars, whitespacechars :: [Char] quotechars = "'\"" whitespacechars = " \t\n\r" escapeDoubleQuotes :: T.Text -> T.Text escapeDoubleQuotes = T.replace "\"" "\\\"" -- escapeSingleQuotes :: T.Text -> T.Text -- escapeSingleQuotes = T.replace "'" "\'" -- escapeQuotes :: String -> String -- escapeQuotes = regexReplace "([\"'])" "\\1" -- -- | Quote-aware version of words - don't split on spaces which are inside quotes. -- -- NB correctly handles "a'b" but not "''a''". Can raise an error if parsing fails. -- words' :: String -> [String] -- words' "" = [] -- words' s = map stripquotes $ fromparse $ parsewith p s -- where -- p = do ss <- (singleQuotedPattern <|> doubleQuotedPattern <|> pattern) `sepBy` many1 spacenonewline -- -- eof -- return ss -- pattern = many (noneOf whitespacechars) -- singleQuotedPattern = between (char '\'') (char '\'') (many $ noneOf "'") -- doubleQuotedPattern = between (char '"') (char '"') (many $ noneOf "\"") -- -- | Quote-aware version of unwords - single-quote strings which contain whitespace -- unwords' :: [Text] -> Text -- unwords' = T.unwords . map quoteIfNeeded -- | Strip one matching pair of single or double quotes on the ends of a string. stripquotes :: Text -> Text stripquotes s = if isSingleQuoted s || isDoubleQuoted s then T.init $ T.tail s else s isSingleQuoted :: Text -> Bool isSingleQuoted s = T.length s >= 2 && T.head s == '\'' && T.last s == '\'' isDoubleQuoted :: Text -> Bool isDoubleQuoted s = T.length s >= 2 && T.head s == '"' && T.last s == '"' -- | Remove all matching pairs of square brackets and parentheses from the text. textUnbracket :: Text -> Text textUnbracket s = T.drop stripN $ T.dropEnd stripN s where matchBracket :: Char -> Maybe Char matchBracket '(' = Just ')' matchBracket '[' = Just ']' matchBracket _ = Nothing expectedClosingBrackets = catMaybes $ takeWhile (/= Nothing) $ matchBracket <$> T.unpack s stripN = length $ takeWhile (uncurry (==)) $ zip expectedClosingBrackets $ reverse $ T.unpack s -- | Join several multi-line strings as side-by-side rectangular strings of the same height, top-padded. -- Treats wide characters as double width. textConcatTopPadded :: [Text] -> Text textConcatTopPadded = TL.toStrict . renderRow def{tableBorders=False, borderSpaces=False} . Group NoLine . map (Header . textCell BottomLeft) -- | Join several multi-line strings as side-by-side rectangular strings of the same height, bottom-padded. -- Treats wide characters as double width. textConcatBottomPadded :: [Text] -> Text textConcatBottomPadded = TL.toStrict . renderRow def{tableBorders=False, borderSpaces=False} . Group NoLine . map (Header . textCell TopLeft) -- -- Functions below treat wide (eg CJK) characters as double-width. -- | General-purpose wide-char-aware single-line text layout function. -- It can left- or right-pad a short string to a minimum width. -- It can left- or right-clip a long string to a maximum width, optionally inserting an ellipsis (the third argument). -- It clips and pads on the right when the fourth argument is true, otherwise on the left. -- It treats wide characters as double width. fitText :: Maybe Int -> Maybe Int -> Bool -> Bool -> Text -> Text fitText mminwidth mmaxwidth ellipsify rightside = clip . pad where clip :: Text -> Text clip s = case mmaxwidth of Just w | realLength s > w -> if rightside then textTakeWidth (w - T.length ellipsis) s <> ellipsis else ellipsis <> T.reverse (textTakeWidth (w - T.length ellipsis) $ T.reverse s) | otherwise -> s where ellipsis = if ellipsify then ".." else "" Nothing -> s pad :: Text -> Text pad s = case mminwidth of Just w | sw < w -> if rightside then s <> T.replicate (w - sw) " " else T.replicate (w - sw) " " <> s | otherwise -> s Nothing -> s where sw = realLength s -- | Double-width-character-aware string truncation. Take as many -- characters as possible from a string without exceeding the -- specified width. Eg textTakeWidth 3 "りんご" = "り". textTakeWidth :: Int -> Text -> Text textTakeWidth _ "" = "" textTakeWidth 0 _ = "" textTakeWidth w t | not (T.null t), let c = T.head t, let cw = charWidth c, cw <= w = T.cons c $ textTakeWidth (w-cw) (T.tail t) | otherwise = "" -- | Add a prefix to each line of a string. linesPrepend :: Text -> Text -> Text linesPrepend prefix = T.unlines . map (prefix<>) . T.lines -- | Add a prefix to the first line of a string, -- and a different prefix to the remaining lines. linesPrepend2 :: Text -> Text -> Text -> Text linesPrepend2 prefix1 prefix2 s = T.unlines $ case T.lines s of [] -> [] l:ls -> (prefix1<>l) : map (prefix2<>) ls -- | Join a list of Text Builders with a newline after each item. unlinesB :: [TB.Builder] -> TB.Builder unlinesB = foldMap (<> TB.singleton '\n') -- | Read a decimal number from a Text. Assumes the input consists only of digit -- characters. readDecimal :: Text -> Integer readDecimal = T.foldl' step 0 where step a c = a * 10 + toInteger (digitToInt c) tests_Text = testGroup "Text" [ testCase "quoteIfSpaced" $ do quoteIfSpaced "a'a" @?= "a'a" quoteIfSpaced "a\"a" @?= "a\"a" quoteIfSpaced "a a" @?= "\"a a\"" quoteIfSpaced "mimi's cafe" @?= "\"mimi's cafe\"" quoteIfSpaced "\"alex\" cafe" @?= "\"\\\"alex\\\" cafe\"" quoteIfSpaced "le'shan's cafe" @?= "\"le'shan's cafe\"" quoteIfSpaced "\"be'any's\" cafe" @?= "\"\\\"be'any's\\\" cafe\"", testCase "textUnbracket" $ do textUnbracket "()" @?= "" textUnbracket "(a)" @?= "a" textUnbracket "(ab)" @?= "ab" textUnbracket "[ab]" @?= "ab" textUnbracket "([ab])" @?= "ab" textUnbracket "(()b)" @?= "()b" textUnbracket "[[]b]" @?= "[]b" textUnbracket "[()b]" @?= "()b" textUnbracket "[([]())]" @?= "[]()" textUnbracket "[([[[()]]])]" @?= "" textUnbracket "[([[[(]]])]" @?= "(" textUnbracket "[([[[)]]])]" @?= ")" ] hledger-lib-1.30/Text/Tabular/AsciiWide.hs0000644000000000000000000003174114434445206016530 0ustar0000000000000000-- | Text.Tabular.AsciiArt from tabular-0.2.2.7, modified to treat -- wide characters as double width. {-# LANGUAGE OverloadedStrings #-} module Text.Tabular.AsciiWide ( module Text.Tabular , TableOpts(..) , render , renderTable , renderTableB , renderTableByRowsB , renderRow , renderRowB , renderColumns , Cell(..) , Align(..) , emptyCell , textCell , textsCell , cellWidth , concatTables ) where import Data.Bifunctor (bimap) import Data.Maybe (fromMaybe) import Data.Default (Default(..)) import Data.List (intercalate, intersperse, transpose) import Data.Semigroup (stimesMonoid) import Data.Text (Text) import qualified Data.Text as T import qualified Data.Text.Lazy as TL import Data.Text.Lazy.Builder (Builder, fromString, fromText, singleton, toLazyText) import Safe (maximumMay) import Text.Tabular import Text.WideString (WideBuilder(..), wbFromText) -- | The options to use for rendering a table. data TableOpts = TableOpts { prettyTable :: Bool -- ^ Pretty tables , tableBorders :: Bool -- ^ Whether to display the outer borders , borderSpaces :: Bool -- ^ Whether to display spaces around bars } deriving (Show) instance Default TableOpts where def = TableOpts { prettyTable = False , tableBorders = True , borderSpaces = True } -- | Cell contents along an alignment data Cell = Cell Align [WideBuilder] -- | How to align text in a cell data Align = TopRight | BottomRight | BottomLeft | TopLeft deriving (Show) emptyCell :: Cell emptyCell = Cell TopRight [] -- | Create a single-line cell from the given contents with its natural width. textCell :: Align -> Text -> Cell textCell a x = Cell a . map wbFromText $ if T.null x then [""] else T.lines x -- | Create a multi-line cell from the given contents with its natural width. textsCell :: Align -> [Text] -> Cell textsCell a = Cell a . fmap wbFromText -- | Return the width of a Cell. cellWidth :: Cell -> Int cellWidth (Cell _ xs) = fromMaybe 0 . maximumMay $ map wbWidth xs -- | Render a table according to common options, for backwards compatibility render :: Bool -> (rh -> Text) -> (ch -> Text) -> (a -> Text) -> Table rh ch a -> TL.Text render pretty fr fc f = renderTable def{prettyTable=pretty} (cell . fr) (cell . fc) (cell . f) where cell = textCell TopRight -- | Render a table according to various cell specifications> renderTable :: TableOpts -- ^ Options controlling Table rendering -> (rh -> Cell) -- ^ Rendering function for row headers -> (ch -> Cell) -- ^ Rendering function for column headers -> (a -> Cell) -- ^ Function determining the string and width of a cell -> Table rh ch a -> TL.Text renderTable topts fr fc f = toLazyText . renderTableB topts fr fc f -- | A version of renderTable which returns the underlying Builder. renderTableB :: TableOpts -- ^ Options controlling Table rendering -> (rh -> Cell) -- ^ Rendering function for row headers -> (ch -> Cell) -- ^ Rendering function for column headers -> (a -> Cell) -- ^ Function determining the string and width of a cell -> Table rh ch a -> Builder renderTableB topts fr fc f = renderTableByRowsB topts (fmap fc) $ bimap fr (fmap f) -- | A version of renderTable that operates on rows (including the 'row' of -- column headers) and returns the underlying Builder. renderTableByRowsB :: TableOpts -- ^ Options controlling Table rendering -> ([ch] -> [Cell]) -- ^ Rendering function for column headers -> ((rh, [a]) -> (Cell, [Cell])) -- ^ Rendering function for row and row header -> Table rh ch a -> Builder renderTableByRowsB topts@TableOpts{prettyTable=pretty, tableBorders=borders} fc f (Table rh ch cells) = unlinesB . addBorders $ renderColumns topts sizes ch2 : bar VM DoubleLine -- +======================================+ : renderRs (renderR <$> zipHeader [] cellContents rowHeaders) where renderR :: ([Cell], Cell) -> Builder renderR (cs,h) = renderColumns topts sizes $ Group DoubleLine [ Header h , fst <$> zipHeader emptyCell cs colHeaders ] rows = unzip . fmap f $ zip (headerContents rh) cells rowHeaders = fst <$> zipHeader emptyCell (fst rows) rh colHeaders = fst <$> zipHeader emptyCell (fc $ headerContents ch) ch cellContents = snd rows -- ch2 and cell2 include the row and column labels ch2 = Group DoubleLine [Header emptyCell, colHeaders] cells2 = headerContents ch2 : zipWith (:) (headerContents rowHeaders) cellContents -- maximum width for each column sizes = map (fromMaybe 0 . maximumMay . map cellWidth) $ transpose cells2 renderRs (Header s) = [s] renderRs (Group p hs) = intercalate sep $ map renderRs hs where sep = renderHLine VM borders pretty sizes ch2 p -- borders and bars addBorders xs = if borders then bar VT SingleLine : xs ++ [bar VB SingleLine] else xs bar vpos prop = mconcat $ renderHLine vpos borders pretty sizes ch2 prop unlinesB = foldMap (<> singleton '\n') -- | Render a single row according to cell specifications. renderRow :: TableOpts -> Header Cell -> TL.Text renderRow topts = toLazyText . renderRowB topts -- | A version of renderRow which returns the underlying Builder. renderRowB:: TableOpts -> Header Cell -> Builder renderRowB topts h = renderColumns topts is h where is = map cellWidth $ headerContents h verticalBar :: Bool -> Char verticalBar pretty = if pretty then '│' else '|' leftBar :: Bool -> Bool -> Builder leftBar pretty True = fromString $ verticalBar pretty : " " leftBar pretty False = singleton $ verticalBar pretty rightBar :: Bool -> Bool -> Builder rightBar pretty True = fromString $ ' ' : [verticalBar pretty] rightBar pretty False = singleton $ verticalBar pretty midBar :: Bool -> Bool -> Builder midBar pretty True = fromString $ ' ' : verticalBar pretty : " " midBar pretty False = singleton $ verticalBar pretty doubleMidBar :: Bool -> Bool -> Builder doubleMidBar pretty True = fromText $ if pretty then " ║ " else " || " doubleMidBar pretty False = fromText $ if pretty then "║" else "||" -- | We stop rendering on the shortest list! renderColumns :: TableOpts -- ^ rendering options for the table -> [Int] -- ^ max width for each column -> Header Cell -> Builder renderColumns TableOpts{prettyTable=pretty, tableBorders=borders, borderSpaces=spaces} is h = mconcat . intersperse "\n" -- Put each line on its own line . map (addBorders . mconcat) . transpose -- Change to a list of lines and add borders . map (either hsep padCell) . flattenHeader -- We now have a matrix of strings . zipHeader 0 is $ padRow <$> h -- Pad cell height and add width marker where -- Pad each cell to have the appropriate width padCell (w, Cell TopLeft ls) = map (\x -> wbBuilder x <> fromText (T.replicate (w - wbWidth x) " ")) ls padCell (w, Cell BottomLeft ls) = map (\x -> wbBuilder x <> fromText (T.replicate (w - wbWidth x) " ")) ls padCell (w, Cell TopRight ls) = map (\x -> fromText (T.replicate (w - wbWidth x) " ") <> wbBuilder x) ls padCell (w, Cell BottomRight ls) = map (\x -> fromText (T.replicate (w - wbWidth x) " ") <> wbBuilder x) ls -- Pad each cell to have the same number of lines padRow (Cell TopLeft ls) = Cell TopLeft $ ls ++ replicate (nLines - length ls) mempty padRow (Cell TopRight ls) = Cell TopRight $ ls ++ replicate (nLines - length ls) mempty padRow (Cell BottomLeft ls) = Cell BottomLeft $ replicate (nLines - length ls) mempty ++ ls padRow (Cell BottomRight ls) = Cell BottomRight $ replicate (nLines - length ls) mempty ++ ls hsep :: Properties -> [Builder] hsep NoLine = replicate nLines $ if spaces then " " else "" hsep SingleLine = replicate nLines $ midBar pretty spaces hsep DoubleLine = replicate nLines $ doubleMidBar pretty spaces addBorders xs | borders = leftBar pretty spaces <> xs <> rightBar pretty spaces | spaces = fromText " " <> xs <> fromText " " | otherwise = xs nLines = fromMaybe 0 . maximumMay . map (\(Cell _ ls) -> length ls) $ headerContents h renderHLine :: VPos -> Bool -- ^ show outer borders -> Bool -- ^ pretty -> [Int] -- ^ width specifications -> Header a -> Properties -> [Builder] renderHLine _ _ _ _ _ NoLine = [] renderHLine vpos borders pretty w h prop = [renderHLine' vpos borders pretty prop w h] renderHLine' :: VPos -> Bool -> Bool -> Properties -> [Int] -> Header a -> Builder renderHLine' vpos borders pretty prop is hdr = addBorders $ sep <> coreLine <> sep where addBorders xs = if borders then edge HL <> xs <> edge HR else xs edge hpos = boxchar vpos hpos SingleLine prop pretty coreLine = foldMap helper $ flattenHeader $ zipHeader 0 is hdr helper = either vsep dashes dashes (i,_) = stimesMonoid i sep sep = boxchar vpos HM NoLine prop pretty vsep v = case v of NoLine -> sep <> sep _ -> sep <> cross v prop <> sep cross v h = boxchar vpos HM v h pretty data VPos = VT | VM | VB -- top middle bottom data HPos = HL | HM | HR -- left middle right boxchar :: VPos -> HPos -> Properties -> Properties -> Bool -> Builder boxchar vpos hpos vert horiz = lineart u d l r where u = case vpos of VT -> NoLine _ -> vert d = case vpos of VB -> NoLine _ -> vert l = case hpos of HL -> NoLine _ -> horiz r = case hpos of HR -> NoLine _ -> horiz pick :: Text -> Text -> Bool -> Builder pick x _ True = fromText x pick _ x False = fromText x lineart :: Properties -> Properties -> Properties -> Properties -> Bool -> Builder -- up down left right lineart SingleLine SingleLine SingleLine SingleLine = pick "┼" "+" lineart SingleLine SingleLine SingleLine NoLine = pick "┤" "+" lineart SingleLine SingleLine NoLine SingleLine = pick "├" "+" lineart SingleLine NoLine SingleLine SingleLine = pick "┴" "+" lineart NoLine SingleLine SingleLine SingleLine = pick "┬" "+" lineart SingleLine NoLine NoLine SingleLine = pick "└" "+" lineart SingleLine NoLine SingleLine NoLine = pick "┘" "+" lineart NoLine SingleLine SingleLine NoLine = pick "┐" "+" lineart NoLine SingleLine NoLine SingleLine = pick "┌" "+" lineart SingleLine SingleLine NoLine NoLine = pick "│" "|" lineart NoLine NoLine SingleLine SingleLine = pick "─" "-" lineart DoubleLine DoubleLine DoubleLine DoubleLine = pick "╬" "++" lineart DoubleLine DoubleLine DoubleLine NoLine = pick "╣" "++" lineart DoubleLine DoubleLine NoLine DoubleLine = pick "╠" "++" lineart DoubleLine NoLine DoubleLine DoubleLine = pick "╩" "++" lineart NoLine DoubleLine DoubleLine DoubleLine = pick "╦" "++" lineart DoubleLine NoLine NoLine DoubleLine = pick "╚" "++" lineart DoubleLine NoLine DoubleLine NoLine = pick "╝" "++" lineart NoLine DoubleLine DoubleLine NoLine = pick "╗" "++" lineart NoLine DoubleLine NoLine DoubleLine = pick "╔" "++" lineart DoubleLine DoubleLine NoLine NoLine = pick "║" "||" lineart NoLine NoLine DoubleLine DoubleLine = pick "═" "=" lineart DoubleLine NoLine NoLine SingleLine = pick "╙" "++" lineart DoubleLine NoLine SingleLine NoLine = pick "╜" "++" lineart NoLine DoubleLine SingleLine NoLine = pick "╖" "++" lineart NoLine DoubleLine NoLine SingleLine = pick "╓" "++" lineart SingleLine NoLine NoLine DoubleLine = pick "╘" "+" lineart SingleLine NoLine DoubleLine NoLine = pick "╛" "+" lineart NoLine SingleLine DoubleLine NoLine = pick "╕" "+" lineart NoLine SingleLine NoLine DoubleLine = pick "╒" "+" lineart DoubleLine DoubleLine SingleLine NoLine = pick "╢" "++" lineart DoubleLine DoubleLine NoLine SingleLine = pick "╟" "++" lineart DoubleLine NoLine SingleLine SingleLine = pick "╨" "++" lineart NoLine DoubleLine SingleLine SingleLine = pick "╥" "++" lineart SingleLine SingleLine DoubleLine NoLine = pick "╡" "+" lineart SingleLine SingleLine NoLine DoubleLine = pick "╞" "+" lineart SingleLine NoLine DoubleLine DoubleLine = pick "╧" "+" lineart NoLine SingleLine DoubleLine DoubleLine = pick "╤" "+" lineart SingleLine SingleLine DoubleLine DoubleLine = pick "╪" "+" lineart DoubleLine DoubleLine SingleLine SingleLine = pick "╫" "++" lineart _ _ _ _ = const mempty -- | Add the second table below the first, discarding its column headings. concatTables :: Properties -> Table rh ch a -> Table rh ch2 a -> Table rh ch a concatTables prop (Table hLeft hTop dat) (Table hLeft' _ dat') = Table (Group prop [hLeft, hLeft']) hTop (dat ++ dat') hledger-lib-1.30/Text/Megaparsec/Custom.hs0000644000000000000000000003715014434445206016616 0ustar0000000000000000-- A bunch of megaparsec helpers for re-parsing etc. -- I think these are generic apart from the HledgerParseError name. {-# LANGUAGE BangPatterns #-} {-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} -- new {-# LANGUAGE LambdaCase #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} -- new module Text.Megaparsec.Custom ( -- * Custom parse error types HledgerParseErrorData, HledgerParseErrors, -- * Failing with an arbitrary source position parseErrorAt, parseErrorAtRegion, -- * Re-parsing SourceExcerpt, getExcerptText, excerpt_, reparseExcerpt, -- * Pretty-printing custom parse errors customErrorBundlePretty, -- * "Final" parse errors FinalParseError, FinalParseError', FinalParseErrorBundle, FinalParseErrorBundle', -- * Constructing "final" parse errors finalError, finalFancyFailure, finalFail, finalCustomFailure, -- * Pretty-printing "final" parse errors finalErrorBundlePretty, attachSource, -- * Handling parse errors from include files with "final" parse errors parseIncludeFile, ) where import Control.Monad.Except (ExceptT, MonadError, catchError, throwError) import Control.Monad.State.Strict (StateT, evalStateT) import Control.Monad.Trans.Class (lift) import qualified Data.List.NonEmpty as NE import Data.Monoid (Alt(..)) import qualified Data.Set as S import Data.Text (Text) import Text.Megaparsec --- * Custom parse error types -- | Custom error data for hledger parsers. Specialised for a 'Text' parse stream. -- ReparseableTextParseErrorData ? data HledgerParseErrorData -- | Fail with a message at a specific source position interval. The -- interval must be contained within a single line. = ErrorFailAt Int -- Starting offset Int -- Ending offset String -- Error message -- | Re-throw parse errors obtained from the "re-parsing" of an excerpt -- of the source text. | ErrorReparsing (NE.NonEmpty (ParseError Text HledgerParseErrorData)) -- Source fragment parse errors deriving (Show, Eq, Ord) -- | A specialised version of ParseErrorBundle: -- a non-empty collection of hledger parse errors, -- equipped with PosState to help pretty-print them. -- Specialised for a 'Text' parse stream. type HledgerParseErrors = ParseErrorBundle Text HledgerParseErrorData -- We require an 'Ord' instance for 'CustomError' so that they may be -- stored in a 'Set'. The actual instance is inconsequential, so we just -- derive it, but the derived instance requires an (orphan) instance for -- 'ParseError'. Hopefully this does not cause any trouble. deriving instance Ord (ParseError Text HledgerParseErrorData) -- Note: the pretty-printing of our 'HledgerParseErrorData' type is only partally -- defined in its 'ShowErrorComponent' instance; we perform additional -- adjustments in 'customErrorBundlePretty'. instance ShowErrorComponent HledgerParseErrorData where showErrorComponent (ErrorFailAt _ _ errMsg) = errMsg showErrorComponent (ErrorReparsing _) = "" -- dummy value errorComponentLen (ErrorFailAt startOffset endOffset _) = endOffset - startOffset errorComponentLen (ErrorReparsing _) = 1 -- dummy value --- * Failing with an arbitrary source position -- | Fail at a specific source position, given by the raw offset from the -- start of the input stream (the number of tokens processed at that -- point). parseErrorAt :: Int -> String -> HledgerParseErrorData parseErrorAt offset = ErrorFailAt offset (offset+1) -- | Fail at a specific source interval, given by the raw offsets of its -- endpoints from the start of the input stream (the numbers of tokens -- processed at those points). -- -- Note that care must be taken to ensure that the specified interval does -- not span multiple lines of the input source. This will not be checked. parseErrorAtRegion :: Int -- ^ Start offset -> Int -- ^ End end offset -> String -- ^ Error message -> HledgerParseErrorData parseErrorAtRegion startOffset endOffset msg = if startOffset < endOffset then ErrorFailAt startOffset endOffset msg' else ErrorFailAt startOffset (startOffset+1) msg' where msg' = "\n" ++ msg --- * Re-parsing -- | A fragment of source suitable for "re-parsing". The purpose of this -- data type is to preserve the content and source position of the excerpt -- so that parse errors raised during "re-parsing" may properly reference -- the original source. data SourceExcerpt = SourceExcerpt Int -- Offset of beginning of excerpt Text -- Fragment of source file -- | Get the raw text of a source excerpt. getExcerptText :: SourceExcerpt -> Text getExcerptText (SourceExcerpt _ txt) = txt -- | 'excerpt_ p' applies the given parser 'p' and extracts the portion of -- the source consumed by 'p', along with the source position of this -- portion. This is the only way to create a source excerpt suitable for -- "re-parsing" by 'reparseExcerpt'. -- This function could be extended to return the result of 'p', but we don't -- currently need this. excerpt_ :: MonadParsec HledgerParseErrorData Text m => m a -> m SourceExcerpt excerpt_ p = do offset <- getOffset (!txt, _) <- match p pure $ SourceExcerpt offset txt -- | 'reparseExcerpt s p' "re-parses" the source excerpt 's' using the -- parser 'p'. Parse errors raised by 'p' will be re-thrown at the source -- position of the source excerpt. -- -- In order for the correct source file to be displayed when re-throwing -- parse errors, we must ensure that the source file during the use of -- 'reparseExcerpt s p' is the same as that during the use of 'excerpt_' -- that generated the source excerpt 's'. However, we can usually expect -- this condition to be satisfied because, at the time of writing, the -- only changes of source file in the codebase take place through include -- files, and the parser for include files neither accepts nor returns -- 'SourceExcerpt's. reparseExcerpt :: Monad m => SourceExcerpt -> ParsecT HledgerParseErrorData Text m a -> ParsecT HledgerParseErrorData Text m a reparseExcerpt (SourceExcerpt offset txt) p = do (_, res) <- lift $ runParserT' p (offsetInitialState offset txt) case res of Right result -> pure result Left errBundle -> customFailure $ ErrorReparsing $ bundleErrors errBundle where offsetInitialState :: Int -> s -> #if MIN_VERSION_megaparsec(8,0,0) State s e #else State s #endif offsetInitialState initialOffset s = State { stateInput = s , stateOffset = initialOffset , statePosState = PosState { pstateInput = s , pstateOffset = initialOffset , pstateSourcePos = initialPos "" , pstateTabWidth = defaultTabWidth , pstateLinePrefix = "" } #if MIN_VERSION_megaparsec(8,0,0) , stateParseErrors = [] #endif } --- * Pretty-printing custom parse errors -- | Pretty-print our custom parse errors. It is necessary to use this -- instead of 'errorBundlePretty' when custom parse errors are thrown. -- -- This function intercepts our custom parse errors and applies final -- adjustments ('finalizeCustomError') before passing them to -- 'errorBundlePretty'. These adjustments are part of the implementation -- of the behaviour of our custom parse errors. -- -- Note: We must ensure that the offset of the 'PosState' of the provided -- 'ParseErrorBundle' is no larger than the offset specified by a -- 'ErrorFailAt' constructor. This is guaranteed if this offset is set to -- 0 (that is, the beginning of the source file), which is the -- case for 'ParseErrorBundle's returned from 'runParserT'. customErrorBundlePretty :: HledgerParseErrors -> String customErrorBundlePretty errBundle = let errBundle' = errBundle { bundleErrors = NE.sortWith errorOffset $ -- megaparsec requires that the list of errors be sorted by their offsets bundleErrors errBundle >>= finalizeCustomError } in errorBundlePretty errBundle' where finalizeCustomError :: ParseError Text HledgerParseErrorData -> NE.NonEmpty (ParseError Text HledgerParseErrorData) finalizeCustomError err = case findCustomError err of Nothing -> pure err Just errFailAt@(ErrorFailAt startOffset _ _) -> -- Adjust the offset pure $ FancyError startOffset $ S.singleton $ ErrorCustom errFailAt Just (ErrorReparsing errs) -> -- Extract and finalize the inner errors errs >>= finalizeCustomError -- If any custom errors are present, arbitrarily take the first one -- (since only one custom error should be used at a time). findCustomError :: ParseError Text HledgerParseErrorData -> Maybe HledgerParseErrorData findCustomError err = case err of FancyError _ errSet -> finds (\case {ErrorCustom e -> Just e; _ -> Nothing}) errSet _ -> Nothing finds :: (Foldable t) => (a -> Maybe b) -> t a -> Maybe b finds f = getAlt . foldMap (Alt . f) --- * "Final" parse errors -- -- | A type representing "final" parse errors that cannot be backtracked -- from and are guaranteed to halt parsing. The anti-backtracking -- behaviour is implemented by an 'ExceptT' layer in the parser's monad -- stack, using this type as the 'ExceptT' error type. -- -- We have three goals for this type: -- (1) it should be possible to convert any parse error into a "final" -- parse error, -- (2) it should be possible to take a parse error thrown from an include -- file and re-throw it in the parent file, and -- (3) the pretty-printing of "final" parse errors should be consistent -- with that of ordinary parse errors, but should also report a stack of -- files for errors thrown from include files. -- -- In order to pretty-print a "final" parse error (goal 3), it must be -- bundled with include filepaths and its full source text. When a "final" -- parse error is thrown from within a parser, we do not have access to -- the full source, so we must hold the parse error until it can be joined -- with its source (and include filepaths, if it was thrown from an -- include file) by the parser's caller. -- -- A parse error with include filepaths and its full source text is -- represented by the 'FinalParseErrorBundle' type, while a parse error in -- need of either include filepaths, full source text, or both is -- represented by the 'FinalParseError' type. data FinalParseError' e -- a parse error thrown as a "final" parse error = FinalError (ParseError Text e) -- a parse error obtained from running a parser, e.g. using 'runParserT' | FinalBundle (ParseErrorBundle Text e) -- a parse error thrown from an include file | FinalBundleWithStack (FinalParseErrorBundle' e) deriving (Show) type FinalParseError = FinalParseError' HledgerParseErrorData -- We need a 'Monoid' instance for 'FinalParseError' so that 'ExceptT -- FinalParseError m' is an instance of Alternative and MonadPlus, which -- is needed to use some parser combinators, e.g. 'many'. -- -- This monoid instance simply takes the first (left-most) error. instance Semigroup (FinalParseError' e) where e <> _ = e instance Monoid (FinalParseError' e) where mempty = FinalError $ FancyError 0 $ S.singleton (ErrorFail "default parse error") mappend = (<>) -- | A type bundling a 'ParseError' with its full source text, filepath, -- and stack of include files. Suitable for pretty-printing. -- -- Megaparsec's 'ParseErrorBundle' type already bundles a parse error with -- its full source text and filepath, so we just add a stack of include -- files. data FinalParseErrorBundle' e = FinalParseErrorBundle' { finalErrorBundle :: ParseErrorBundle Text e , includeFileStack :: [FilePath] } deriving (Show) type FinalParseErrorBundle = FinalParseErrorBundle' HledgerParseErrorData --- * Constructing and throwing final parse errors -- | Convert a "regular" parse error into a "final" parse error. finalError :: ParseError Text e -> FinalParseError' e finalError = FinalError -- | Like megaparsec's 'fancyFailure', but as a "final" parse error. finalFancyFailure :: (MonadParsec e s m, MonadError (FinalParseError' e) m) => S.Set (ErrorFancy e) -> m a finalFancyFailure errSet = do offset <- getOffset throwError $ FinalError $ FancyError offset errSet -- | Like 'fail', but as a "final" parse error. finalFail :: (MonadParsec e s m, MonadError (FinalParseError' e) m) => String -> m a finalFail = finalFancyFailure . S.singleton . ErrorFail -- | Like megaparsec's 'customFailure', but as a "final" parse error. finalCustomFailure :: (MonadParsec e s m, MonadError (FinalParseError' e) m) => e -> m a finalCustomFailure = finalFancyFailure . S.singleton . ErrorCustom --- * Pretty-printing "final" parse errors -- | Pretty-print a "final" parse error: print the stack of include files, -- then apply the pretty-printer for parse error bundles. Note that -- 'attachSource' must be used on a "final" parse error before it can be -- pretty-printed. finalErrorBundlePretty :: FinalParseErrorBundle' HledgerParseErrorData -> String finalErrorBundlePretty bundle = concatMap showIncludeFilepath (includeFileStack bundle) <> customErrorBundlePretty (finalErrorBundle bundle) where showIncludeFilepath path = "in file included from " <> path <> ",\n" -- | Supply a filepath and source text to a "final" parse error so that it -- can be pretty-printed. You must ensure that you provide the appropriate -- source text and filepath. attachSource :: FilePath -> Text -> FinalParseError' e -> FinalParseErrorBundle' e attachSource filePath sourceText finalParseError = case finalParseError of -- A parse error thrown directly with the 'FinalError' constructor -- requires both source and filepath. FinalError err -> let bundle = ParseErrorBundle { bundleErrors = err NE.:| [] , bundlePosState = initialPosState filePath sourceText } in FinalParseErrorBundle' { finalErrorBundle = bundle , includeFileStack = [] } -- A 'ParseErrorBundle' already has the appropriate source and filepath -- and so needs neither. FinalBundle peBundle -> FinalParseErrorBundle' { finalErrorBundle = peBundle , includeFileStack = [] } -- A parse error from a 'FinalParseErrorBundle' was thrown from an -- include file, so we add the filepath to the stack. FinalBundleWithStack fpeBundle -> fpeBundle { includeFileStack = filePath : includeFileStack fpeBundle } --- * Handling parse errors from include files with "final" parse errors -- | Parse a file with the given parser and initial state, discarding the -- final state and re-throwing any parse errors as "final" parse errors. parseIncludeFile :: Monad m => StateT st (ParsecT HledgerParseErrorData Text (ExceptT FinalParseError m)) a -> st -> FilePath -> Text -> StateT st (ParsecT HledgerParseErrorData Text (ExceptT FinalParseError m)) a parseIncludeFile parser initialState filepath text = catchError parser' handler where parser' = do eResult <- lift $ lift $ runParserT (evalStateT parser initialState) filepath text case eResult of Left parseErrorBundle -> throwError $ FinalBundle parseErrorBundle Right result -> pure result -- Attach source and filepath of the include file to its parse errors handler e = throwError $ FinalBundleWithStack $ attachSource filepath text e --- * Helpers -- Like megaparsec's 'initialState', but instead for 'PosState'. Used when -- constructing 'ParseErrorBundle's. The values for "tab width" and "line -- prefix" are taken from 'initialState'. initialPosState :: FilePath -> Text -> PosState Text initialPosState filePath sourceText = PosState { pstateInput = sourceText , pstateOffset = 0 , pstateSourcePos = initialPos filePath , pstateTabWidth = defaultTabWidth , pstateLinePrefix = "" } hledger-lib-1.30/Text/WideString.hs0000644000000000000000000000211614434445206015346 0ustar0000000000000000-- | Calculate the width of String and Text, being aware of wide characters. module Text.WideString ( -- * Text Builders which keep track of length WideBuilder(..), wbUnpack, wbToText, wbFromText ) where import Data.Text (Text) import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy.Builder as TB import Text.DocLayout (realLength) -- | Helper for constructing Builders while keeping track of text width. data WideBuilder = WideBuilder { wbBuilder :: !TB.Builder , wbWidth :: !Int } deriving (Show) instance Semigroup WideBuilder where WideBuilder x i <> WideBuilder y j = WideBuilder (x <> y) (i + j) instance Monoid WideBuilder where mempty = WideBuilder mempty 0 -- | Convert a WideBuilder to a strict Text. wbToText :: WideBuilder -> Text wbToText = TL.toStrict . TB.toLazyText . wbBuilder -- | Convert a strict Text to a WideBuilder. wbFromText :: Text -> WideBuilder wbFromText t = WideBuilder (TB.fromText t) (realLength t) -- | Convert a WideBuilder to a String. wbUnpack :: WideBuilder -> String wbUnpack = TL.unpack . TB.toLazyText . wbBuilder hledger-lib-1.30/test/unittest.hs0000644000000000000000000000072214434445206015202 0ustar0000000000000000{- Run the hledger-lib package's unit tests using the tasty test runner. -} -- package-qualified import to avoid cabal missing-home-modules warning (and double-building ?) {-# LANGUAGE PackageImports #-} import "hledger-lib" Hledger (tests_Hledger) import System.Environment (setEnv) import Test.Tasty (defaultMain) main :: IO () main = do setEnv "TASTY_HIDE_SUCCESSES" "true" setEnv "TASTY_ANSI_TRICKS" "false" -- helps the above defaultMain tests_Hledger hledger-lib-1.30/test/doctests.hs0000644000000000000000000000373114434445206015156 0ustar0000000000000000{- Run doctests in Hledger source files under the current directory (./Hledger.hs, ./Hledger/**, ./Text/**) using the doctest runner. https://github.com/sol/doctest#readme Arguments are case-insensitive file path substrings, to limit the files searched. --verbose shows files being searched for doctests and progress while running. --slow reloads ghci between each test (https://github.com/sol/doctest#a-note-on-performance). Eg, in hledger source dir: $ make ghci-doctest, :main [--verbose] [--slow] [CIFILEPATHSUBSTRINGS] or: $ stack test hledger-lib:test:doctest --test-arguments="--verbose --slow [CIFILEPATHSUBSTRINGS]" -} -- This file can't be called doctest.hs ("File name does not match module name") {-# LANGUAGE PackageImports #-} import Control.Monad import Data.Char import Data.List import System.Environment import "Glob" System.FilePath.Glob import Test.DocTest main :: IO () main = do args <- getArgs let verbose = "--verbose" `elem` args slow = "--slow" `elem` args pats = filter (not . ("-" `isPrefixOf`)) args -- find source files sourcefiles <- (filter (not . isInfixOf "/.") . concat) <$> sequence [ glob "Hledger.hs" ,glob "Hledger/**/*.hs" ,glob "Text/**/*.hs" ] -- filter by patterns (case insensitive infix substring match) let fs | null pats = sourcefiles | otherwise = [f | f <- sourcefiles, let f' = map toLower f, any (`isInfixOf` f') pats'] where pats' = map (map toLower) pats fslen = length fs if (null fs) then do putStrLn $ "No file paths found matching: " ++ unwords pats else do putStrLn $ "Loading and searching for doctests in " ++ show fslen ++ if fslen > 1 then " files, plus any files they import:" else " file, plus any files it imports:" when verbose $ putStrLn $ unwords fs doctest $ (if verbose then ("--verbose" :) else id) $ -- doctest >= 0.15.0 (if slow then id else ("--fast" :)) $ -- doctest >= 0.11.4 fs hledger-lib-1.30/CHANGES.md0000644000000000000000000017710214436245522013412 0ustar0000000000000000 Internal/api/developer-ish changes in the hledger-lib (and hledger) packages. For user-visible changes, see the hledger package changelog. # 1.30 2023-06-01 Breaking changes - dropped: Hledger.Data.RawOptions.inRawOpts Misc. changes - added more terminal size, ANSI style/color helpers in Hledger.Utils.IO (and therefore Hledger and Hledger.Cli.Script): getTerminalHeightWidth getTerminalHeight getTerminalWidth bold' faint' black' red' green' yellow' blue' magenta' cyan' white' brightBlack' brightRed' brightGreen' brightYellow' brightBlue' brightMagenta' brightCyan' brightWhite' rgb' multicol expandGlob sortByModTime # 1.29.2 2023-04-07 # 1.29.1 2023-03-16 - Hledger.Utils.String: added: strip1Char stripBy strip1By - Allow building with GHC 9.6.1; add base-compat (#2011) # 1.29 2023-03-11 - added terminal colour detection helpers: terminalIsLight terminalLightness terminalFgColor terminalBgColor - Hledger.Data.RawOptions: add unsetboolopt - add journalMarkRedundantCosts to help with balancing - journalInferCosts -> journalInferCostsFromEquity - `BalancingOpts{infer_transaction_prices_ -> infer_balancing_costs_}` - Hledger.Data.Balancing: inferBalancingPrices -> transactionInferBalancingCosts - Hledger.Data.Balancing: inferBalancingAmount -> transactionInferBalancingAmount - Hledger.Data.Journal: transactionAddPricesFromEquity -> transactionInferCostsFromEquity - Hledger.Data.Journal: journalAddPricesFromEquity -> journalInferCosts - Hledger.Data.Dates: intervalStartBefore -> intervalBoundaryBefore - Hledger.Read.Common: cleaned up some amount parsers; describe Ledger lot notation ``` amountpwithmultiplier -> amountp' amountpnolotpricesp -> amountnobasisp amountwithoutpricep -> simpleamountp priceamountp -> costp ``` - depend on text-ansi # 1.28 2022-12-01 - Hledger.Utils.Debug's debug logging helpers have been unified. The "trace or log" functions log to stderr by default, or to a file if ",logging" is appended to the program name (using withProgName). The debug log file is PROGNAME.log (changed from debug.log). - Moved from Hledger.Utils.Debug to Hledger.Utils.Parse: traceParse traceParseAt dbgparse - Moved from Hledger.Utils.Debug to Hledger.Utils.Print: pshow pshow' pprint pprint' colorOption useColorOnStdout useColorOnStderr outputFileOption hasOutputFile - Rename Hledger.Utils.Print -> Hledger.Utils.IO, consolidate utils there. - Hledger.Utils cleaned up. - Hledger.Data.Amount: showMixedAmountOneLine now also shows costs. Note that different costs are kept separate in amount arithmetic. - Hledger.Read.Common: rename/add amount parsing helpers. added: parseamount parseamount' parsemixedamount parsemixedamount' removed: amountp' mamountp' - Hledger.Utils.Parse: export customErrorBundlePretty, for pretty-printing hledger parse errors. - Support megaparsec 9.3. (Felix Yan) - Support GHC 9.4. - Update cabal files to match hpack 0.35/stack 2.9 # 1.27 2022-09-01 Breaking changes - Support for GHC 8.6 and 8.8 has been dropped. hledger now requires GHC 8.10 or newer. - Hledger.Data.Amount: `amount` has been dropped; use `nullamt` instead. - journal*AccountQuery functions have been dropped; use a type: query instead. cbcsubreportquery no longer takes Journal as an argument. (#1921) Misc. changes - Hledger.Utils.Debug now re-exports Debug.Breakpoint from the breakpoint library, so that breakpoint's helpers can be used easily during development. - Hledger.Utils.Debug: dlog has been replaced by more reliable functions for debug-logging to a file (useful for debugging TUI apps like hledger-ui): dlogTrace dlogTraceAt dlogAt dlog0 dlog1 dlog2 dlog3 dlog4 dlog5 dlog6 dlog7 dlog8 dlog9 - Hledger.Utils.Debug: pprint' and pshow' have been added, forcing monochrome output. - Hledger.Utils.String: add quoteForCommandLine - Hledger.Data.Errors: export makeBalanceAssertionErrorExcerpt - Hledger.Utils.Parse: export HledgerParseErrors - Debug logging from journalFilePath and the include directive will now show "(unknown)" instead of an empty string. # 1.26.1 2022-07-11 - require safe 0.3.19+ to avoid deprecation warning # 1.26 2022-06-04 Breaking changes - readJournal, readJournalFile, readJournalFiles now return `ExceptT String IO a` instead of `IO (Either String a)`. Internally, this increases composability and avoids some ugly case handling. It means that these must now be evaluated with `runExceptT`. That can be imported from `Control.Monad.Except` in the `mtl` package, but `Hledger.Read` also re-exports it for convenience. New variants readJournal', readJournalFiles', readJournalFile' are also provided; these are like the old functions but more convenient, assuming default input options and needing one less argument. (Stephen Morgan) - parseAndFinaliseJournal' (a variant of parseAndFinaliseJournal) has been removed. In the unlikely event you needed it in your code, you can replace: ```haskell parseAndFinaliseJournal' parser iopts fp t ``` with: ```haskell initialiseAndParseJournal parser iopts fp t >>= liftEither . journalApplyAliases (aliasesFromOpts iopts) >>= journalFinalise iopts fp t ``` - Some parsers have been generalised from JournalParser to TextParser. (Stephen Morgan) Misc. changes - Allow doclayout 0.4. - Our doctests now run with GHC 9.2+ only, to avoid doctest issues. - Hledger.Data.JournalChecks: some Journal checks have been moved and renamed: journalCheckAccounts, journalCheckCommodities, journalCheckPayees - Hledger.Data.Errors: new error formatting helpers makeTransactionErrorExcerpt, makePostingErrorExcerpt, transactionFindPostingIndex - HledgerParseErrors is a new type alias for our parse errors. CustomErr has been renamed to HledgerParseErrorData. - Hledger.Query: added matchesQuery, queryIsCode, queryIsTransactionRelated - Improve ergonomics of SmartDate constructors. (Stephen Morgan) - Hledger.Utils: Add a helper function numDigitsInt to get the number of digits in an integer, which has a surprising number of ways to get it wrong. ([#1813](https://github.com/simonmichael/hledger/issues/1813) (Stephen Morgan) # 1.25 2022-03-04 - hledger-lib now builds with GHC 9.2 and latest deps. ([#1774](https://github.com/simonmichael/hledger/issues/1774) - Journal has a new jaccounttypes map. The journalAccountType lookup function makes it easy to check an account's type. The journalTags and journalInheritedTags functions look up an account's tags. Functions like journalFilterPostings and journalFilterTransactions, and new matching functions matchesAccountExtra, matchesPostingExtra and matchesTransactionExtra, use these to allow more powerful matching that is aware of account types and tags. - Journal has a new jdeclaredaccounttags field for easy lookup of account tags. Query.matchesTaggedAccount is a tag-aware version of matchesAccount. - Some account name functions have moved from Hledger.Data.Posting to Hledger.Data.AccountName: accountNamePostingType, accountNameWithPostingType, accountNameWithoutPostingType, joinAccountNames, concatAccountNames, accountNameApplyAliases, accountNameApplyAliasesMemo. - Renamed: CommodityLayout to Layout. # 1.24.1 2021-12-10 Improvements - Added: filterQueryOrNotQuery. # 1.24 2021-12-01 Improvements - The Semigroup instance of PeriodicReportRow and PeriodicReport now preserves the first prrName, rather than the second. (Stephen Morgan) - PeriodicReport and PeriodicReportRow now have Bifunctor instances. (Stephen Morgan) - Move posting rendering functions into Hledger.Data.Posting. This produces slightly different output for showPosting, in particular it no longer displays the transaction date. However, this has been marked as ‘for debugging only’ for a while. (Stephen Morgan) - Drop postingDateOrDate2, transactionDateOrDate2; rename whichDateFromOpts to whichDate. (#1731) - Added new helper functions journalValueAndFilterPostings(With) to make valuation and filtration less error prone. (Stephen Morgan) - Avoid deprecation warnings with safe 0.3.18+. (Stephen Morgan) - Drop base-compat-batteries dependency. (Stephen Morgan) - Allow megaparsec 9.2. # 1.23 2021-09-21 - Require base >=4.11, prevent red squares on Hackage's build matrix. Much code cleanup and reorganisation, such as: - Introduce lenses for many types. (Stephen Morgan) - The now-obsolete normaliseMixedAmount and normaliseMixedAmountSquashPricesForDisplay functions have been dropped. (Stephen Morgan) - GenericSourcePos has been dropped, replaced by either SourcePos or (SourcePos, SourcePos), simplifying module structure. (Stephen Morgan) - Functions related to balancing (both transaction balancing and journal balancing) have been moved to Hledger.Data.Balancing, reducing module size and reducing the risk of import cycles. (Stephen Morgan) - `ReportOptions{infer_value_}` has been renamed to `infer_prices_`, for more consistency with the corresponding CLI flag. And `BalancingOpts{infer_prices_}` is now `infer_transaction_prices_`. - JournalParser and ErroringJournalParser have moved to Hledger.Data.Journal. (Stephen Morgan) - MixedAmounts now have a more predictable Ord instance / sort order. They are compared in each commodity in turn, with alphabetically-first commodity symbols being most significant. Missing commodities are assumed to be zero. As a consequence, all the ways of representing zero with a MixedAmount ([], [A 0], [A 0, B 0, ...]) are now Eq-ual (==), whereas before they were not. We have not been able to find anything broken by this change. ([#1563](https://github.com/simonmichael/hledger/issues/1563), [#1564](https://github.com/simonmichael/hledger/issues/1564), Stephen Morgan) - HUnit's testCase and testGroup are now used directly instead of having test and tests aliases. (Stephen Morgan) - The codebase now passes many hlint checks - Dropped modules: Hledger.Utils.Color, Hledger.Data.Commodity, Hledger.Utils.UTF8IOCompat, Hledger.Utils.Tree module. (Stephen Morgan) - Drop the deprecated old-time lib. A small number type signatures have changed: journalSetLastReadTime, maybeFileModificationTime and Journal now use POSIXTime instead of ClockTime. Hledger.Cli.Utils.utcTimeToClockTime has been removed, as it is now equivalent to utcTimeToPOSIXSeconds from Data.Time.Clock.POSIX. To get the current system time, you should now use getPOSIXTime from Data.Time.Clock.POSIX instead of getClockTime. ([#1650](https://github.com/simonmichael/hledger/issues/1650), Stephen Morgan) - modifyTransactions now takes a Map of commodity styles, and will style amounts according to that argument. journalAddForecast and journalTransform now return an Either String Journal. (Stephen Morgan) This improves efficiency, as we no longer have to restyle all amounts in the journal after generating auto postings or periodic transactions. Changing the return type of journalAddForecast and journalTransform reduces partiality. To get the previous behaviour for modifyTransaction, use modifyTransaction mempty. - Refactor journalFinalise to clarify flow. (Stephen Morgan) The only semantic difference is that we now apply journalApplyCommodityStyles before running journalCheckAccountsDeclared and journalCheckCommoditiesDeclared. - Introduce lenses for ReportOpts and ReportSpec. (Stephen Morgan) - Rename the fields of ReportSpec. (Stephen Morgan) This is done to be more consistent with future field naming conventions, and to make automatic generation of lenses simpler. See discussion in [#1545](https://github.com/simonmichael/hledger/issues/1545). rsOpts -> _rsReportOpts rsToday -> _rsDay rsQuery -> _rsQuery rsQueryOpts -> _rsQueryOpts - Remove aismultiplier from Amount. (Stephen Morgan) In Amount, aismultiplier is a boolean flag that will always be False, except for in TMPostingRules, where it indicates whether the posting rule is a multiplier. It is therefore unnecessary in the vast majority of cases. This posting pulls this flag out of Amount and puts it into TMPostingRule, so it is only kept around when necessary. This changes the parsing of journals somewhat. Previously you could include an * before an amount anywhere in a Journal, and it would happily parse and set the aismultiplier flag true. This will now fail with a parse error: * is now only acceptable before an amount within an auto posting rule. Any usage of the library in which the aismultiplier field is read or set should be removed. If you truly need its functionality, you should switch to using TMPostingRule. This changes the JSON output of Amount, as it will no longer include aismultiplier. - For accountTransactionsReport, generate the overall reportq from the ReportSpec, rather than being supplied as a separate option. (Stephen Morgan) This is the same approach used by the other reports, e.g. EntryReport, PostingReport, MultiBalanceReport. This reduces code duplication, as previously the reportq had to be separately tweaked in each of 5 different places. If you call accountTransactionreport, there is no need to separately derive the report query. - Remove unused TransactionReport. Move the useful utility functions to AccountTransactionsReport. (Stephen Morgan) If you use transactionsReport, you should either use entryReport if you don't require a running total, or using accountTransactionsReport with thisacctq as Any or None (depending on what you want included in the running total). - Some balance report types have been renamed for clarity and to sync with docs: ReportType -> BalanceCalculation ChangeReport -> CalcChange BudgetReport -> CalcBudget ValueChangeReport -> CalcValueChange BalanceType -> BalanceAccumulation PeriodChange -> PerPeriod CumulativeChange -> Cumulative HistoricalBalance -> Historical ReportOpts: reporttype_ -> balancecalc_ balancetype_ -> balanceaccum_ CompoundBalanceCommandSpec: cbctype -> cbcaccum Hledger.Reports.ReportOptions: balanceTypeOverride -> balanceAccumulationOverride # 1.22.2 2021-08-07 - forecast_ has moved from ReportOpts to InputOpts. (Stephen Morgan) - Generate forecast transactions at journal finalisation, rather than as a postprocessing step. This allows us to have a uniform procedure for balancing transactions, whether they are normal transactions or forecast transactions, including dealing with balance assignments, balance assertions, and auto postings. ([#1638](https://github.com/simonmichael/hledger/issues/1638), Stephen Morgan) # 1.22.1 2021-08-02 - Allow megaparsec 9.1 - journalEndDate's behaviour has been clarified, journalLastDay has been added. - transactionCheckBalanced is now exported. (#1596) # 1.22 2021-07-03 - GHC 9.0 is now officially supported, and GHC 8.0, 8.2, 8.4 are not; building hledger now requires GHC 8.6 or greater. - Added now-required lower bound on containers. (#1514) - Added useColor, colorOption helpers usable in pure code, eg for debug output. - Added a Show instance for AmountDisplayOpts and WideBuilder, for debug logging. Many internal refactorings/improvements/optimisations by Stephen Morgan, including: - Don't infer a txn price with same-sign amounts. (#1551) - Clean up valuation functions, and make clear which to use where. (#1560) - Replace journalSelectingAmountFromOpts with journalApplyValuationFromOpts. This also has the effect of allowing valuation in more reports, for example the transactionReport. - Refactor to eliminate use of printf. - Remove unused String, Text utility functions. - Replace concat(Top|Bottom)Padded with textConcat(Top|Bottom)Padded. - Export Text.Tabular from Text.Tabular.AsciiWide, clean up import lists. - When matching an account query against a posting, don't try to match against the same posting twice, in cases when poriginal is Nothing. - Create mixedAmountApplyValuationAfterSumFromOptsWith for doing any valuation needed after summing amounts. - Create journalApplyValuationFromOpts. This does costing and valuation on a journal, and is meant to replace most direct calls of costing and valuation. The exception is for reports which require amounts to be summed before valuation is applied, for example a historical balance report with --value=end. - Remove unused (amount|mixedAmount|posting|transaction)ApplyCostValuation functions. - Remove unnecessary normalisedMixedAmount. - Remove `showAmounts*B` functions, replacing them entirely with `showMixedAmount*B` functions. - Pull "show-costs" option used by the Close command up into ReporOpts. - Add more efficient toEncoding for custom ToJSON declarations. - Fix ledgerDateSpan, so that it considers both transaction and posting dates. (#772) - Move reportPeriodName to Hledger.Reports.ReportOptions, use it for HTML and CSV output for compound balance reports. - Simplify the JSON representation of AmountPrecision. It now uses the same JSON representation as Maybe Word8. This means that the JSON serialisation is now broadly compatible with that used before the commit f6fa76bba7530af3be825445a1097ae42498b1cd, differing only in how it handles numbers outside Word8 and that it can now produce null for NaturalPrecision. - A number of AccountName and Journal functions which are supposed to produce unique sorted results now use Sets internally to be slightly more efficient. There is also a new function journalCommodities. - More efficiently check whether Amounts are or appear to be zero. Comparing two Quantity (either with == or compare) does a lot of normalisation (calling roundMax) which is unnecessary if we're comparing to zero. Do things more directly to save work. For `reg -f examples/10000x10000x10.journal`, this results in - A 12% reduction in heap allocations, from 70GB to 62GB - A 14% reduction in (profiled) time, from 79s to 70s Results for bal -f examples/10000x10000x10.journal are of the same order of magnitude. - In sorting account names, perform lookups on HashSets and HashMaps, rather than lists. This is probably not an enormous performance sink in real situations, but it takes a huge amount of time and memory in our benchmarks (specifically 10000x10000x10.journal). For `bal -f examples/10000x10000x10.journal`, this results in - A 23% reduction in heap allocation, from 27GiB to 21GiB - A 33% reduction in (profiled) time running, from 26.5s to 17.9s - Minor refactor, using foldMap instead of asum . map . toList. - Do not call showAmount twice for every posting. For print -f examples/10000x10000x10.journal, this results in a 7.7% reduction in heap allocations, from 7.6GB to 7.1GB. - Some efficiency improvements in register reports. Use renderRow interface for Register report. For `reg -f examples/10000x10000x10.journal`, this results in: - Heap allocations decreasing by 55%, from 68.6GB to 31.2GB - Resident memory decreasing by 75%, from 254GB to 65GB - Total (profiled) time decreasing by 55%, from 37s to 20s - Split showMixedAmountB into showMixedAmountB and showAmountsB, the former being a simple wrapper around the latter. This removes the need for the showNormalised option, as showMixedAmountB will always showNormalised and showAmountsB will never do so. - Change internal representation of MixedAmount to use a strict Map instead of a list of Amounts. No longer export Mixed constructor, to keep API clean. (If you really need it, you can import it directly from Hledger.Data.Types). We also ensure the JSON representation of MixedAmount doesn't change: it is stored as a normalised list of Amounts. This commit improves performance. Here are some indicative results: hledger reg -f examples/10000x1000x10.journal - Maximum residency decreases from 65MB to 60MB (8% decrease) - Total memory in use decreases from 178MiB to 157MiB (12% decrease) hledger reg -f examples/10000x10000x10.journal - Maximum residency decreases from 69MB to 60MB (13% decrease) - Total memory in use decreases from 198MiB to 153MiB (23% decrease) hledger bal -f examples/10000x1000x10.journal - Total heap usage decreases from 6.4GB to 6.0GB (6% decrease) - Total memory in use decreases from 178MiB to 153MiB (14% decrease) hledger bal -f examples/10000x10000x10.journal - Total heap usage decreases from 7.3GB to 6.9GB (5% decrease) - Total memory in use decreases from 196MiB to 185MiB (5% decrease) hledger bal -M -f examples/10000x1000x10.journal - Total heap usage decreases from 16.8GB to 10.6GB (47% decrease) - Total time decreases from 14.3s to 12.0s (16% decrease) hledger bal -M -f examples/10000x10000x10.journal - Total heap usage decreases from 108GB to 48GB (56% decrease) - Total time decreases from 62s to 41s (33% decrease) If you never directly use the constructor Mixed or pattern match against it then you don't need to make any changes. If you do, then do the following: - If you really care about the individual Amounts and never normalise your MixedAmount (for example, just storing `Mixed amts` and then extracting `amts` as a pattern match, then use should switch to using [Amount]. This should just involve removing the `Mixed` constructor. - If you ever call `mixed`, `normaliseMixedAmount`, or do any sort of amount arithmetic (+), (-), then you should replace the constructor `Mixed` with the function `mixed`. To extract the list of Amounts, use the function `amounts`. - Any remaining calls to `normaliseMixedAmount` can be removed, as that is now the identity function. - Create a new API for MixedAmount arithmetic. This should supplant the old interface, which relied on the Num typeclass. MixedAmount did not have a very good Num instance. The only functions which were defined were fromInteger, (+), and negate. Furthermore, it was not law-abiding, as 0 + a /= a in general. Replacements for used functions are: 0 -> nullmixedamt / mempty (+) -> maPlus / (<>) (-) -> maMinus negate -> maNegate sum -> maSum sumStrict -> maSum Also creates some new constructors for MixedAmount: mixedAmount :: Amount -> MixedAmount maAddAmount :: MixedAmount -> Amount -> MixedAmount maAddAmounts :: MixedAmount -> [Amount] -> MixedAmount Add Semigroup and Monoid instances for MixedAmount. Ideally we would remove the Num instance entirely. The only change needed have nullmixedamt/mempty substitute for 0 without problems was to not squash prices in mixedAmount(Looks|Is)Zero. This is correct behaviour in any case. # 1.21 2021-03-10 - Building Hledger.Data.Journal no longer fails if the monad-extras package is installed. - Many parts of the hledger-lib and hledger APIs have become more Text-ified, expecting or returning Text instead of String, reducing hledger's time and resident memory requirements by roughly 10%. Some functions now use WideBuilder (a text "builder" which keeps track of width), to concatenate text more efficiently. There are some helpers for converting to and from WideBuilder (wbUnpack, wbToText..) showAmountB/showMixedAmountB are new amount-displaying functions taking an AmountDisplayOpts. These will probably replace the old show(Mixed)Amount* functions. (#1427, Stephen Morgan) - AtThen valuation is now implemented for all report types. amountApplyValuation now takes the posting date as an argument. (transaction/posting)ApplyValuation's valuation type and transaction/posting arguments have been reordered like amountApplyValuation's. (Stephen Morgan) - Amount, AmountPrice, AmountStyle, DigitGroupStyle fields are now strict. (Stephen Morgan) - Amount prices are now stored with their sign, so negative prices can be represented. (They seem to have always worked, but now the internal representation is more accurate.) (Stephen Morgan) - normaliseMixedAmount now combines Amounts with TotalPrices in the same commodity. (Stephen Morgan) - normaliseMixedAmount now uses a strict Map for combining amounts internally, closing a big space leak. (Stephen Morgan) - (multiply|divide)(Mixed)?Amount now also multiply or divide the TotalPrice if it is present, and the old (multiply|divide)(Mixed)?AmountAndPrice functions are removed. (Stephen Morgan) - (amount|mixedAmount)(Looks|Is)Zero functions now check whether both the quantity and the cost are zero. This is usually what you want, but if you do only want to check whether the quantity is zero, you can run mixedAmountStripPrices (or similar) before this. (Stephen Morgan) - commodityStylesFromAmounts now consumes the list immediately, reducing the maximum heap size per thread from ~850K to ~430K in a real-world register report. (Stephen Morgan) - *ApplyValuation functions take two less arguments, and *ApplyCostValuation functions have been added, performing both costing and valuation. (Stephen Morgan) - traceAtWith now has a level argument and works properly. - API changes include: ``` Hledger.Data.Amount: setAmountPrecision -> amountSetPrecision setFullPrecision -> amountSetFullPrecision setMixedAmountPrecision -> mixedAmountSetPrecision showMixed -> showMixedAmountB showMixedLines -> showMixedAmountLinesB -mixedAmountSetFullPrecision Hledger.Data.Journal: mapJournalTransactions -> journalMapTransactions mapJournalPostings -> journalMapPostings -mapTransactionPostings +journalPayeesUsed +journalPayeesDeclaredOrUsed Hledger.Data.Transaction: +transactionFile +transactionMapPostings Hledger.Data.Valuation: -valuationTypeIsCost -valuationTypeIsDefaultValue -ValuationType's AtDefault constructor Hledger.Query: +matchesDescription +matchesPayeeWIP Hledger.Utils.Text: +textConcatBottomPadded +wbToText +wbUnpack Text.Tabular.AsciiWide: alignCell -> textCell ``` # 1.20.4 2021-01-29 - See hledger. # 1.20.3 2021-01-14 - See hledger. # 1.20.2 2020-12-28 - Fix the info manuals' node structure. - Drop unused parsec dependency. # 1.20.1 2020-12-15 - renamed: updateReportSpecFromOpts -> updateReportSpec[With] # 1.20 2020-12-05 - added: journalApplyAliases, transactionApplyAliases, postingApplyAliases - a new more robust price lookup implementation, fgl library dropped (#1402) - Reverted a stripAnsi change in 1.19.1 that caused a 3x slowdown of amount rendering in terminal reports. (#1350) - Amount and table rendering has been improved, so that stripAnsi is no longer needed. This speeds up amount rendering in the terminal, speeding up some reports by 10% or more since 1.19. (Stephen Morgan) - global commodity display styles can now be set in InputOpts or Journal, overriding all others (declared or inferred). This is used by the import command and probably command-line options in future. - Journal keeps a new piece of parsing state, a decimal mark character, which can optionally be set to force the number format expected by all amount parsers. - Remove Empty Query constructor, which does nothing and has done so for a very long time. (Stephen Morgan) - In ReportOpts, store query terms term-by-term in a list in querystring_. (Stephen Morgan) This helps deal with tricky quoting issues, as we no longer have to make sure everything is quoted properly before merging it into a string. - Implement concat(Top|Bottom)Padded in terms of renderRow, allowing them to be width aware. (Stephen Morgan) - Expand Tabular.AsciiWide to allow multiline, custom-width, vertically/horizontally-aligned cells, and optional table borders. (Stephen Morgan) - Introduce showMixed*Unnormalised, eliminate most direct calls of strWidth. (Stephen Morgan) - showMixedAmountElided now makes better use of space, showing as many Amounts possible as long as they and the elision string fit within 32 characters. (Stephen Morgan) - Add Functor instance for CompoundPeriodicReport. (Stephen Morgan) - Generalise CBCSubreportSpec to allow more subreport control. (Stephen Morgan) - Export some MultiBalanceReport helper functions. (Stephen Morgan) - Make Default instances clearer, remove Default instance for Bool. (Stephen Morgan) - Many ReportOpts-related changes, such as the addition of ReportSpec, aimed at preventing runtime errors (from parsing: regexps, dates, format strings; from not having today's date set; etc.) ReportSpec holds a ReportOpts, the day of the report, and the Query generated from these. - StringFormat now takes an optional overline width, which is currently only used by defaultBalanceLineFormat. (Stephen Morgan) - quoteIfNeeded should not escape the backslashes in unicode code points. (Stephen Morgan) - Export OrdPlus and constructors. (Stephen Morgan) - Debug output now uses pretty-simple instead pretty-show. This hopefully gives overall nicer debug output (eg in colour), including for values which don't have Read-able Show output. This means that we can start removing custom Show instances that were a workaround for pretty-show. Eg account names in debug output no longer show their colons as underscores. Here's some old pretty-show output: CsvRules { rdirectives = [ ( "skip" , "1" ) ] , rcsvfieldindexes = [ ( "date" , 1 ) , ( "amount" , 2 ) ] , rassignments = [ ( "amount" , "%2" ) , ( "date" , "%1" ) ] , rconditionalblocks = [] } And the new pretty-simple output: CsvRules { rdirectives= [ ( "skip", "1" ) ] , rcsvfieldindexes= [ ( "date", 1 ), ( "amount", 2 ) ] , rassignments= [ ( "amount", "%2" ), ( "date", "%1" ) ] , rconditionalblocks= [] } We require pretty-simple 4.0.0.0 to get this compact output. It's a little less compact than pretty-show, but not too bad. Non-compact pretty-simple output would be: CsvRules { rdirectives= [ ( "skip" , "1B" ) ] , rcsvfieldindexes= [ ( "date" , 1 ) , ( "amount" , 2 ) ] , rassignments= [ ( "amount" , "%2" ) , ( "date" , "%1" ) ] , rconditionalblocks=[] } # 1.19.1 2020-09-07 - Allow megaparsec 9 - stripAnsi: correctly strip ansi sequences with no numbers/semicolons. (Stephen Morgan) - Added case-insensitive accountNameToAccountRegexCI, accountNameToAccountOnlyRegexCI, made the default account type queries case insensitive again. (#1341) # 1.19 2020-09-01 - Added a missing lower bound for aeson, making cabal installs more reliable. (#1268) - The Regex type alias has been replaced by the Regexp ADT, which contains both the compiled regular expression (so is guaranteed to be usable at runtime) and the original string (so can be serialised, printed, compared, etc.) A Regexp also knows whether is it case sensitive or case insensitive. The Hledger.Utils.Regex API has changed. (#1312, #1330). - Typeable and Data instances are no longer derived for hledger's data types; they were redundant/no longer needed. - NFData instances are no longer derived for hledger's data types. This speeds up a full build by roughly 7%. But it means we can't deep-evaluate hledger values, or time hledger code with Criterion. https://github.com/simonmichael/hledger/pull/1330#issuecomment-684075129 has some ideas on this. - Query no longer has a custom Show instance - Hledger.Utils.String: quoteIfNeeded now actually escapes quotes in strings. escapeQuotes was dropped. (Stephen Morgan) - Hledger.Utils.Tree: dropped some old utilities - Some fromIntegral calls have been replaced with safer code, removing some potential for integer wrapping bugs (#1325, #1326) - Parsing numbers with more than 255 decimal places now gives an error instead of silently misparsing (#1326) - Digit groups are now limited to at most 255 digits each. (#1326) - Exponents are parsed as Integer rather than Int. This means exponents greater than 9223372036854775807 or less than -9223372036854775808 are now parsed correctly, in theory. (In practice, very large exponents will cause hledger to eat all your memory, so avoid them for now.) (#1326) - AmountStyle's asprecision is now a sum type with Word8, instead of an Int with magic values. - DigitGroupStyle uses Word8 instead of Int. - Partial helper function parsedate has been dropped, use fromGregorian instead. - Partial helper function mkdatespan has been dropped. - Helper function transaction now takes a Day instead of a date string. (Stephen Morgan) - Old CPP directives made redundant by version bounds have been removed. (Stephen Morgan) - Smart dates are now represented by the SmartDate type, and are always well formed. (Stephen Morgan) - accountTransactionsReport (used for hledger aregister and hledger-ui/hledger-web registers) now filters transactions more thoroughly, so eg transactions dated outside the report period will not be shown. Previously the transaction would be shown if it had any posting dated inside the report period. Possibly some other filter criteria now get applied that didn't before. I think on balance this will give slightly preferable results. - The old BalanceReport code has been dropped at last, replaced by MultiBalanceReport so that all balance reports now use the same code. (Stephen Morgan, #1256). - The large multiBalanceReport function has been split up and refactored extensively. - Tabular data formerly represented as [[MixedAmount]] is now HashMap AccountName (Map DateSpan Account). Reports with many columns are now faster. - Calculating starting balances no longer calls the whole balanceReport, just the first few functions. - displayedAccounts is completely rewritten. Perhaps one subtle thing to note is that in tree mode it no longer excludes nodes with zero inclusive balance unless they also have zero exclusive balance. - Simon's note: "I'll mark the passing of the old multiBalanceReport, into which I poured many an hour :). It is in a way the heart (brain ?) of hledger - the key feature of ledgerlikes (balance report) and a key improvement introduced by hledger (tabular multiperiod balance reports) ... Thanks @Xitian9, great work." # 1.18.1 2020-06-21 - fix some doc typos (Martin Michlmayr) # 1.18 2020-06-07 - added: getHledgerCliOpts', takes an explicit argument list - added: toJsonText - changed: isNegativeMixedAmount now gives an answer for multi-commodity amounts which are all negative - changed: multiBalanceReport now gets the query from ReportOpts (Dmitry Astapov) - renamed: isZeroAmount -> amountLooksZero isReallyZeroAmount -> amountIsZero isZeroMixedAmount -> mixedAmountLooksZero isReallyZeroMixedAmount -> mixedAmountIsZero isReallyZeroMixedAmountCost dropped - renamed: finaliseJournal -> journalFinalise - renamed: fixedlotpricep -> lotpricep, now also parses non-fixed lot prices - dropped: transactionPostingBalances - dropped: outputflags no longer exported by Hledger.Cli.CliOptions - fixed: documentation for journalExpenseAccountQuery (Pavan Rikhi) # 1.17.1 2020-03-19 - require newer Decimal, math-functions libs to ensure consistent rounding behaviour, even when built with old GHCs/snapshots. hledger uses banker's rounding (rounds to nearest even number, eg 0.5 displayed with zero decimal places is "0"). - added: debug helpers traceAt, traceAtWith - Journal is now a Semigroup, not a Monoid (since <> is right-biased). (Stephen Morgan) # 1.17.0.1 2020-03-01 - fix org heading comments and doctest setup comment that were breaking haddock (and in some cases, installation) # 1.17 2020-03-01 - Reader-finding utilities have moved from Hledger.Read to Hledger.Read.JournalReader so the include directive can use them. - Reader changes: - rExperimental flag removed - old rParser renamed to rReadFn - new rParser field provides the actual parser. This seems to require making Reader a higher-kinded type, unfortunately. - Hledger.Tabular.AsciiWide now renders smoother outer borders in pretty (unicode) mode. Also, a fix for table edges always using single-width intersections and support for double horizontal lines with single vertical lines. (Eric Mertens) - Hledger.Utils.Parse: restofline can go to eof also - Hledger.Read cleanup - Hledger.Read.CsvReader cleanup Exports added: CsvRecord, CsvValue, csvFileFor. Exports removed: expandIncludes, parseAndValidateCsvRules, transactionFromCsvRecord - more cleanup of amount canonicalisation helpers (#1187) Stop exporting journalAmounts, overJournalAmounts, traverseJournalAmounts. Rename journalAmounts helper to journalStyleInfluencingAmounts. - export mapMixedAmount - Don't store leaf name in PeriodReport. (Stephen Morgan) Calculate at the point of consumption instead. - Generalise PeriodicReport to be polymorphic in the account labels. (Stephen Morgan) - Use records instead of tuples in PeriodicReport. (Stephen Morgan) - Use PeriodicReport in place of MultiBalanceReport. (Stephen Morgan) - Calculate MultiReportBalance columns more efficiently. (Stephen Morgan) Only calculate posting date once for each posting, and calculate their columns instead of checking each DateSpan separately. - Moved JSON instances from hledger-web to hledger-lib (Hledger.Data.Json), and added ToJSON instances for all (?) remaining data types, up to Ledger. - Dropped nullassertion's "assertion" alias, fixing a warning. Perhaps we'll stick with the null* naming convention. # 1.16.2 2020-01-14 - add support for megaparsec 8 (#1175) # 1.16.1 2019-12-03 - Drop unnecessary mtl-compat dependency - Fix building with GHC 8.0, 8.2 # 1.16 2019-12-01 - drop support for GHC 7.10, due to MonadFail hassles in JournalReader.hs - add support for GHC 8.8, base-compat 0.11 (#1090) We are now using the new fail from the MonadFail class, which we always import qualified as Fail.fail, from base-compat-batteries Control.Monad.Fail.Compat to work with old GHC versions. If old fail is needed (shouldn't be) it should be imported qualified as Prelude.Fail, using imports such as: import Prelude hiding (fail) import qualified Prelude (fail) import Control.Monad.State.Strict hiding (fail) import "base-compat-batteries" Prelude.Compat hiding (fail) import qualified "base-compat-batteries" Control.Monad.Fail.Compat as Fail - hledger and hledger-lib unit tests have been ported to tasty. - The doctest suite has been disabled for now since it doesn't run well with cabal (#1139) # 1.15.2 2019-09-05 Changes: - postingApplyValuation, mixedAmountApplyValuation, amountApplyValuation take an argument, the report end date if one was specified. # 1.15.1 2019-09-02 - fix failing doctests # 1.15 2019-09-01 Removals include: - journalPrices - BalanceHistoryReport - postingValueAtDate Additions include: - MarketPrice (more pure form of PriceDirective without the amount style information) - PriceOracle (efficient lookup of exchange rates) - ValuationType (ways to convert amount value) - aliasnamep (export) - setNaturalPrecisionUpTo - dbgNWith, ptraceAtWith - postingTransformAmount, postingToCost, postingValue - amountToCost, mixedAmountToCost - valueTypeFromOpts - mapJournalTransactions, mapJournalPostings, mapTransactionPostings - journalStartDate, journalEndDate - journalPriceOracle - marketPriceReverse - priceDirectiveToMarketPrice - mixedAmountApplyValuation - mixedAmountValueAtDate Changes include: - Price -> AmountPrice, AKA "transaction price" - old MarketPrice -> PriceDirective - TransactionsReport/AccountTransactionsReport split into separate files - journalTransactionsReport -> transactionsReport - accountTransactionsReportItems: rewrite using catMaybes and mapAccumL (Henning Thielemann) - optionally save the current date in ReportOpts - Hledger.Cli tests now have correct prefix; add Cli.Utils tests - MultiBalanceReport now returns zero for row totals when in cumulative or historical mode (#329) # 1.14.1 2019-03-20 - require easytest <0.3 to fix build issue # 1.14 2019-03-01 - added: transaction, [v]post*, balassert* constructors, for tests etc. - renamed: porigin -> poriginal - refactored: transaction balancing & balance assertion checking (#438) # 1.13.1 (2019/02/02) - stop depending on here to avoid haskell-src-meta/stackage blockage. # 1.13 (2019/02/01) - in Journal's jtxns field, forecasted txns are appended rather than prepended - API changes: added: +setFullPrecision +setMinimalPrecision +expectParseStateOn +embedFileRelative +hereFileRelative changed: - amultiplier -> aismultiplier - Amount fields reordered for clearer debug output - tpreceding_comment_lines -> tprecedingcomment, reordered - Hledger.Data.TransactionModifier.transactionModifierToFunction -> modifyTransactions - Hledger.Read.Common.applyTransactionModifiers -> Hledger.Data.Journal.journalModifyTransactions - HelpTemplate -> CommandDoc # 1.12 (2018/12/02) - switch to megaparsec 7 (Alex Chen) We now track the stack of include files in Journal ourselves, since megaparsec dropped this feature. - add 'ExceptT' layer to our parser monad again (Alex Chen) We previously had a parser type, 'type ErroringJournalParser = ExceptT String ...' for throwing parse errors without allowing further backtracking. This parser type was removed under the assumption that it would be possible to write our parser without this capability. However, after a hairy backtracking bug, we would now prefer to have the option to prevent backtracking. - Define a 'FinalParseError' type specifically for the 'ExceptT' layer - Any parse error can be raised as a "final" parse error - Tracks the stack of include files for parser errors, anticipating the removal of the tracking of stacks of include files in megaparsec 7 - Although a stack of include files is also tracked in the 'StateT Journal' layer of the parser, it seems easier to guarantee correct error messages in the 'ExceptT FinalParserError' layer - This does not make the 'StateT Journal' stack redundant because the 'ExceptT FinalParseError' stack cannot be used to detect cycles of include files - more support for location-aware parse errors when re-parsing (Alex Chen) - make 'includedirectivep' an 'ErroringJournalParser' (Alex Chen) - drop Ord instance breaking GHC 8.6 build (Peter Simons) - flip the arguments of (divide\|multiply)\[Mixed\]Amount - showTransaction: fix a case showing multiple missing amounts showTransaction could sometimes hide the last posting's amount even if one of the other posting amounts was already implicit, producing invalid transaction output. - plog, plogAt: add missing newline - split up journalFinalise, reorder journal finalisation steps (#893) (Jesse Rosenthal) The `journalFinalise` function has been split up, allowing more granular control. - journalSetTime --> journalSetLastReadTime - journalSetFilePath has been removed, use journalAddFile instead # 1.11.1 (2018/10/06) - add, lib: fix wrong transaction rendering in balance assertion errors and when using the add command # 1.11 (2018/9/30) - compilation now works when locale is unset (#849) - all unit tests have been converted from HUnit+test-framework to easytest - doctests now run quicker by default, by skipping reloading between tests. This can be disabled by passing --slow to the doctests test suite executable. - doctests test suite executable now supports --verbose, which shows progress output as tests are run if doctest 0.16.0+ is installed (and hopefully is harmless otherwise). - doctests now support file pattern arguments, provide more informative output. Limiting to just the file(s) you're interested can make doctest start much quicker. With one big caveat: you can limit the starting files, but it always imports and tests all other local files those import. - a bunch of custom Show instances have been replaced with defaults, for easier troubleshooting. These were sometimes obscuring important details, eg in test failure output. Our new policy is: stick with default derived Show instances as far as possible, but when necessary adjust them to valid haskell syntax so pretty-show can pretty-print them (eg when they contain Day values, cf https://github.com/haskell/time/issues/101). By convention, when fields are shown in less than full detail, and/or in double-quoted pseudo syntax, we show a double period (..) in the output. - Amount has a new Show instance. Amount's show instance hid important details by default, and showing more details required increasing the debug level, which was inconvenient. Now it has a single show instance which shows more information, is fairly compact, and is pretty-printable. ghci> usd 1 OLD: Amount {acommodity="$", aquantity=1.00, ..} NEW: Amount {acommodity = "$", aquantity = 1.00, aprice = NoPrice, astyle = AmountStyle "L False 2 Just '.' Nothing..", amultiplier = False} MixedAmount's show instance is unchanged, but showMixedAmountDebug is affected by this change: ghci> putStrLn $ showMixedAmountDebug $ Mixed [usd 1] OLD: Mixed [Amount {acommodity="$", aquantity=1.00, aprice=, astyle=AmountStyle {ascommodityside = L, ascommodityspaced = False, asprecision = 2, asdecimalpoint = Just '.', asdigitgroups = Nothing}}] NEW: Mixed [Amount {acommodity="$", aquantity=1.00, aprice=, astyle=AmountStyle "L False 2 Just '.' Nothing.."}] - Same-line & next-line comments of transactions, postings, etc. are now parsed a bit more precisely (followingcommentp). Previously, parsing no comment gave the same result as an empty comment (a single newline); now it gives an empty string.\ Also, and perhaps as a consequence of the above, when there's no same-line comment but there is a next-line comment, we'll insert an empty first line, since otherwise next-line comments would get moved up to the same line when rendered. - Hledger.Utils.Test exports HasCallStack - queryDateSpan, queryDateSpan' now intersect date AND'ed date spans instead of unioning them, and docs are clearer. - pushAccount -> pushDeclaredAccount - jaccounts -> jdeclaredaccounts - AutoTransaction.hs -> PeriodicTransaction.hs & TransactionModifier.hs - Hledger.Utils.Debug helpers have been renamed/cleaned up # 1.10 (2018/6/30) - build cleanly with all supported GHC versions again (7.10 to 8.4) - support/use latest base-compat (#794) - support/require megaparsec 6.4+ - extensive refactoring and cleanup of parsers and related types and utilities - readJournalFile(s) cleanup, these now use InputOpts - doctests now run a bit faster (#802) # 1.9.1 (2018/4/30) - new generic PeriodicReport, and some report-related type aliases - new BudgetReport - make (readJournal\|tryReader)s?WithOpts the default api, dropping "WithOpts" - automated postings and command line account aliases happen earlier in journal processing (see hledger changelog) # 1.9 (2018/3/31) - support ghc 8.4, latest deps - when the system text encoding is UTF-8, ignore any UTF-8 BOM prefix found when reading files. - CompoundBalanceReport amounts are now normally positive. The bs/bse/cf/is commands now show normal income, liability and equity balances as positive. Negative numbers now indicate a contra-balance (eg an overdrawn checking account), a net loss, a negative net worth, etc. This makes these reports more like conventional financial statements, and easier to read and share with others. (experimental) - splitSpan now returns no spans for an empty datespan - don't count periodic/modifier txns in Journal debug output - lib/ui/web/api: move embedded manual files to extra-source-files - Use skipMany/skipSome for parsing spacenonewline (Moritz Kiefer) This avoids allocating the list of space characters only to then discard it. - rename, clarify purpose of balanceReportFromMultiBalanceReport - fix some hlint warnings - add some easytest tests # 1.5 (2017/12/31) - -V/--value uses today's market prices by default, not those of last transaction date. #683, #648) - csv: allow balance assignment (balance assertion only, no amount) in csv records (Nadrieril) - journal: allow space as digit group separator character, #330 (Mykola Orliuk) - journal: balance assertion errors now show line of failed assertion posting, #481 (Sam Jeeves) - journal: better errors for directives, #402 (Mykola Orliuk) - journal: better errors for included files, #660 (Mykola Orliuk) - journal: commodity directives in parent files are inherited by included files, #487 (Mykola Orliuk) - journal: commodity directives limits precision even after -B, #509 (Mykola Orliuk) - journal: decimal point/digit group separator chars are now inferred from an applicable commodity directive or default commodity directive. #399, #487 (Mykola Orliuk) - journal: numbers are parsed more strictly (Mykola Orliuk) - journal: support Ledger-style automated postings, enabled with --auto flag (Dmitry Astapov) - journal: support Ledger-style periodic transactions, enabled with --forecast flag (Dmitry Astapov) - period expressions: fix "nth day of {week,month}", which could generate wrong intervals (Dmitry Astapov) - period expressions: month names are now case-insensitive (Dmitry Astapov) - period expressions: stricter checking for invalid expressions (Mykola Orliuk) - period expressions: support "every 11th Nov" (Dmitry Astapov) - period expressions: support "every 2nd Thursday of month" (Dmitry Astapov) - period expressions: support "every Tuesday", short for "every th day of week" (Dmitry Astapov) - remove upper bounds on all but hledger* and base (experimental) It's rare that my deps break their api or that newer versions must be avoided, and very common that they release new versions which I must tediously and promptly test and release hackage revisions for or risk falling out of stackage. Trying it this way for a bit. # 1.4 (2017/9/30) - add readJournalFile\[s\]WithOpts, with simpler arguments and support for detecting new transactions since the last read. - query: add payee: and note: query terms, improve description/payee/note docs (Jakub Zárybnický, Simon Michael, #598, #608) - journal, cli: make trailing whitespace significant in regex account aliases Trailing whitespace in the replacement part of a regular expression account alias is now significant. Eg, converting a parent account to just an account name prefix: --alias '/:acct:/=:acct' - timedot: allow a quantity of seconds, minutes, days, weeks, months or years to be logged as Ns, Nm, Nd, Nw, Nmo, Ny - csv: switch the order of generated postings, so account1 is first. This simplifies things and facilitates future improvements. - csv: show the "creating/using rules file" message only with --debug - csv: fix multiple includes in one rules file - csv: add "newest-first" rule for more robust same-day ordering - deps: allow ansi-terminal 0.7 - deps: add missing parsec lower bound, possibly related to #596, fpco/stackage#2835 - deps: drop oldtime flag, require time 1.5+ - deps: remove ghc < 7.6 support, remove obsolete CPP conditionals - deps: fix test suite with ghc 8.2 # 1.3.1 (2017/8/25) - Fix a bug with -H showing nothing for empty periods (#583, Nicholas Niro) This patch fixes a bug that happened when using the -H option on a period without any transaction. Previously, the behavior was no output at all even though it should have shown the previous ending balances of past transactions. (This is similar to previously using -H with -E, but with the extra advantage of not showing empty accounts) - allow megaparsec 6 (#594) - allow megaparsec-6.1 (Hans-Peter Deifel) - fix test suite with Cabal 2 (#596) # 1.3 (2017/6/30) journal: The "uncleared" transaction/posting status, and associated UI flags and keys, have been renamed to "unmarked" to remove ambiguity and confusion. This means that we have dropped the `--uncleared` flag, and our `-U` flag now matches only unmarked things and not pending ones. See the issue and linked mail list discussion for more background. (#564) csv: assigning to the "balance" field name creates balance assertions (#537, Dmitry Astapov). csv: Doubled minus signs are handled more robustly (fixes #524, Nicolas Wavrant, Simon Michael) Multiple "status:" query terms are now OR'd together. (#564) deps: allow megaparsec 5.3. # 1.2 (2017/3/31) ## journal format A pipe character can optionally be used to delimit payee names in transaction descriptions, for more accurate querying and pivoting by payee. Eg, for a description like `payee name | additional notes`, the two parts will be accessible as pseudo-fields/tags named `payee` and `note`. Some journal parse errors now show the range of lines involved, not just the first. ## ledger format The experimental `ledger:` reader based on the WIP ledger4 project has been disabled, reducing build dependencies. ## Misc Fix a bug when tying the knot between postings and their parent transaction, reducing memory usage by about 10% (#483) (Mykola Orliuk) Fix a few spaceleaks (#413) (Moritz Kiefer) Add Ledger.Parse.Text to package.yaml, fixing a potential build failure. Allow megaparsec 5.2 (#503) Rename optserror -> usageError, consolidate with other error functions # 1.1 (2016/12/31) ## journal format - balance assignments are now supported (#438, #129, #157, #288) This feature also brings a slight performance drop (\~5%); optimisations welcome. - also recognise `*.hledger` files as hledger journal format ## ledger format - use ledger-parse from the ledger4 project as an alternate reader for C++ Ledger journals The idea is that some day we might get better compatibility with Ledger files this way. Right now this reader is not very useful and will be used only if you explicitly select it with a `ledger:` prefix. It parses transaction dates, descriptions, accounts and amounts, and ignores everything else. Amount parsing is delegated to hledger's journal parser, and malformed amounts might be silently ignored. This adds at least some of the following as new dependencies for hledger-lib: parsers, parsec, attoparsec, trifecta. ## misc - update base lower bound to enforce GHC 7.10+ hledger-lib had a valid install plan with GHC 7.8, but currently requires GHC 7.10 to compile. Now we require base 4.8+ everywhere to ensure the right GHC version at the start. - Hledger.Read api cleanups - rename dbgIO to dbg0IO, consistent with dbg0, and document a bug in dbg*IO - make readJournalFiles \[f\] equivalent to readJournalFile f (#437) - more general parser types enabling reuse outside of IO (#439) # 1.0.1 (2016/10/27) - allow megaparsec 5.0 or 5.1 # 1.0 (2016/10/26) ## timedot format - new "timedot" format for retroactive/approximate time logging. Timedot is a plain text format for logging dated, categorised quantities (eg time), supported by hledger. It is convenient for approximate and retroactive time logging, eg when the real-time clock-in/out required with a timeclock file is too precise or too interruptive. It can be formatted like a bar chart, making clear at a glance where time was spent. ## timeclock format - renamed "timelog" format to "timeclock", matching the emacs package - sessions can no longer span file boundaries (unclocked-out sessions will be auto-closed at the end of the file). - transaction ids now count up rather than down (#394) - timeclock files no longer support default year directives - removed old code for appending timeclock transactions to journal transactions. A holdover from the days when both were allowed in one file. ## csv format - fix empty field assignment parsing, rule parse errors after megaparsec port (#407) (Hans-Peter Deifel) ## journal format - journal files can now include timeclock or timedot files (#320) (but not yet CSV files). - fixed an issue with ordering of same-date transactions included from other files - the "commodity" directive and "format" subdirective are now supported, allowing full control of commodity style (#295) The commodity directive's format subdirective can now be used to override the inferred style for a commodity, eg to increase or decrease the precision. This is at least a good workaround for #295. - Ledger-style "apply account"/"end apply account" directives are now used to set a default parent account. - the Ledger-style "account" directive is now accepted (and ignored). - bracketed posting dates are more robust (#304) Bracketed posting dates were fragile; they worked only if you wrote full 10-character dates. Also some semantics were a bit unclear. Now they should be robust, and have been documented more clearly. This is a legacy undocumented Ledger syntax, but it improves compatibility and might be preferable to the more verbose "date:" tags if you write posting dates often (as I do). Internally, bracketed posting dates are no longer considered to be tags. Journal comment, tag, and posting date parsers have been reworked, all with doctests. - balance assertion failure messages are clearer - with --debug=2, more detail about balance assertions is shown. ## misc - file parsers have been ported from Parsec to Megaparsec \o/ (#289, #366) (Alexey Shmalko, Moritz Kiefer) - most hledger types have been converted from String to Text, reducing memory usage by 30%+ on large files - file parsers have been simplified for easier troubleshooting (#275). The journal/timeclock/timedot parsers, instead of constructing opaque journal update functions which are later applied to build the journal, now construct the journal directly by modifying the parser state. This is easier to understand and debug. It also rules out the possibility of journal updates being a space leak. (They weren't, in fact this change increased memory usage slightly, but that has been addressed in other ways). The ParsedJournal type alias has been added to distinguish "being-parsed" journals and "finalised" journals. - file format detection is more robust. The Journal, Timelog and Timedot readers' detectors now check each line in the sample data, not just the first one. I think the sample data is only about 30 chars right now, but even so this fixed a format detection issue I was seeing. Also, we now always try parsing stdin as journal format (not just sometimes). - all file formats now produce transaction ids, not just journal (#394) - git clone of the hledger repo on windows now works (#345) - added missing benchmark file (#342) - our stack.yaml files are more compatible across stack versions (#300) - use newer file-embed to fix ghci working directory dependence () - report more accurate dates in account transaction report when postings have their own dates (affects hledger-ui and hledger-web registers). The newly-named "transaction register date" is the date to be displayed for that transaction in a transaction register, for some current account and filter query. It is either the transaction date from the journal ("transaction general date"), or if postings to the current account and matched by the register's filter query have their own dates, the earliest of those posting dates. - simplify account transactions report's running total. The account transactions report used for hledger-ui and -web registers now gives either the "period total" or "historical total", depending strictly on the --historical flag. It doesn't try to indicate whether the historical total is the accurate historical balance (which depends on the user's report query). - reloading a file now preserves the effect of options, query arguments etc. - reloading a journal should now reload all included files as well. - the Hledger.Read.* modules have been reorganised for better reuse. Hledger.Read.Utils has been renamed Hledger.Read.Common and holds low-level parsers & utilities; high-level read utilities are now in Hledger.Read. - clarify amount display style canonicalisation code and terminology a bit. Individual amounts still have styles; from these we derive the standard "commodity styles". In user docs, we might call these "commodity formats" since they can be controlled by the "format" subdirective in journal files. - Journal is now a monoid - expandPath now throws a proper IO error - more unit tests, start using doctest 0.27 (2015/10/30) - The main hledger types now derive NFData, which makes it easier to time things with criterion. - Utils has been split up more. - Utils.Regex: regular expression compilation has been memoized, and memoizing versions of regexReplace\[CI\] have been added, since compiling regular expressions every time seems to be quite expensive (#244). - Utils.String: strWidth is now aware of multi-line strings (#242). - Read: parsers now use a consistent p suffix. - New dependencies: deepseq, uglymemo. - All the hledger packages' cabal files are now generated from simpler, less redundant yaml files by hpack, in principle. In practice, manual fixups are still needed until hpack gets better, but it's still a win. 0.26 (2015/7/12) - allow year parser to handle arbitrarily large years - Journal's Show instance reported one too many accounts - some cleanup of debug trace helpers - tighten up some date and account name parsers (don't accept leading spaces; hadddocks) - drop regexpr dependency 0.25.1 (2015/4/29) - support/require base-compat >0.8 (#245) 0.25 (2015/4/7) - GHC 7.10 compatibility (#239) 0.24.1 (2015/3/15) - fix JournalReader "ctx" compilation warning - add some type signatures in Utils to help make ghci-web 0.24 (2014/12/25) - fix combineJournalUpdates folding order - fix a regexReplaceCI bug - fix a splitAtElement bug with adjacent separators - mostly replace slow regexpr with regex-tdfa (fixes #189) - use the modern Text.Parsec API - allow transformers 0.4* - regexReplace now supports backreferences - Transactions now remember their parse location in the journal file - export Regexp types, disambiguate CsvReader's similarly-named type - export failIfInvalidMonth/Day (fixes #216) - track the commodity of zero amounts when possible (useful eg for hledger-web's multi-commodity charts) - show posting dates in debug output - more debug helpers 0.23.3 (2014/9/12) - allow transformers 0.4* 0.23.2 (2014/5/8) - postingsReport: also fix date sorting of displayed postings (#184) 0.23.1 (2014/5/7) - postingsReport: with disordered journal entries, postings before the report start date could get wrongly included. (#184) 0.23 (2014/5/1) - orDatesFrom -> spanDefaultsFrom 0.22.2 (2014/4/16) - display years before 1000 with four digits, not three - avoid pretty-show to build with GHC < 7.4 - allow text 1.1, drop data-pprint to build with GHC 7.8.x 0.22.1 (2014/1/6) and older: see http://hledger.org/release-notes or doc/CHANGES.md. hledger-lib-1.30/README.md0000644000000000000000000000045714434445206013274 0ustar0000000000000000# hledger-lib A reusable library containing hledger's core functionality. This is used by most hledger* packages so that they support the same common file formats, command line options, reports etc. See also: the [project README](https://hledger.org/README.html) and [home page](https://hledger.org). hledger-lib-1.30/LICENSE0000644000000000000000000010451313302271455013014 0ustar0000000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . hledger-lib-1.30/Setup.hs0000644000000000000000000000005613302271455013440 0ustar0000000000000000import Distribution.Simple main = defaultMain hledger-lib-1.30/hledger-lib.cabal0000644000000000000000000001603614436245522015160 0ustar0000000000000000cabal-version: 1.12 -- This file has been generated from package.yaml by hpack version 0.35.1. -- -- see: https://github.com/sol/hpack name: hledger-lib version: 1.30 synopsis: A reusable library providing the core functionality of hledger description: A reusable library containing hledger's core functionality. This is used by most hledger* packages so that they support the same common file formats, command line options, reports etc. . hledger is a robust, cross-platform set of tools for tracking money, time, or any other commodity, using double-entry accounting and a simple, editable file format, with command-line, terminal and web interfaces. It is a Haskell rewrite of Ledger, and one of the leading implementations of Plain Text Accounting. Read more at: category: Finance stability: stable homepage: http://hledger.org bug-reports: http://bugs.hledger.org author: Simon Michael maintainer: Simon Michael license: GPL-3 license-file: LICENSE build-type: Simple tested-with: GHC==8.10.7, GHC==9.0.2, GHC==9.2.7, GHC==9.4.4 extra-source-files: CHANGES.md README.md test/unittest.hs test/doctests.hs source-repository head type: git location: https://github.com/simonmichael/hledger library exposed-modules: Hledger Hledger.Data Hledger.Data.Account Hledger.Data.AccountName Hledger.Data.Amount Hledger.Data.Balancing Hledger.Data.Dates Hledger.Data.Errors Hledger.Data.Journal Hledger.Data.JournalChecks Hledger.Data.JournalChecks.Ordereddates Hledger.Data.JournalChecks.Uniqueleafnames Hledger.Data.Json Hledger.Data.Ledger Hledger.Data.Period Hledger.Data.PeriodicTransaction Hledger.Data.StringFormat Hledger.Data.Posting Hledger.Data.RawOptions Hledger.Data.Timeclock Hledger.Data.Transaction Hledger.Data.TransactionModifier Hledger.Data.Types Hledger.Data.Valuation Hledger.Query Hledger.Read Hledger.Read.Common Hledger.Read.CsvReader Hledger.Read.CsvUtils Hledger.Read.InputOptions Hledger.Read.JournalReader Hledger.Read.RulesReader Hledger.Read.TimedotReader Hledger.Read.TimeclockReader Hledger.Reports Hledger.Reports.ReportOptions Hledger.Reports.ReportTypes Hledger.Reports.AccountTransactionsReport Hledger.Reports.BalanceReport Hledger.Reports.BudgetReport Hledger.Reports.EntriesReport Hledger.Reports.MultiBalanceReport Hledger.Reports.PostingsReport Hledger.Utils Hledger.Utils.Debug Hledger.Utils.IO Hledger.Utils.Parse Hledger.Utils.Regex Hledger.Utils.String Hledger.Utils.Test Hledger.Utils.Text Text.Tabular.AsciiWide other-modules: Text.Megaparsec.Custom Text.WideString Paths_hledger_lib hs-source-dirs: ./ ghc-options: -Wall -Wno-incomplete-uni-patterns -Wno-missing-signatures -Wno-orphans -Wno-type-defaults -Wno-unused-do-bind build-depends: Decimal >=0.5.1 , Glob >=0.9 , aeson >=1 , aeson-pretty , ansi-terminal >=0.9 , array , base >=4.14 && <4.19 , base-compat , blaze-markup >=0.5.1 , bytestring , call-stack , cassava , cassava-megaparsec , cmdargs >=0.10 , colour >=2.3.6 , containers >=0.5.9 , data-default >=0.5 , deepseq , directory , doclayout >=0.3 && <0.5 , extra >=1.6.3 , file-embed >=0.0.10 , filepath , hashtables >=1.2.3.1 , megaparsec >=7.0.0 && <9.4 , microlens >=0.4 , microlens-th >=0.4 , mtl >=2.2.1 , parser-combinators >=0.4.0 , pretty-simple >4 && <5 , regex-tdfa , safe >=0.3.19 , tabular >=0.2 , tasty >=1.2.3 , tasty-hunit >=0.10.0.2 , template-haskell , terminal-size >=0.3.3 , text >=1.2 , text-ansi >=0.2.1 , time >=1.5 , timeit , transformers >=0.2 , uglymemo , unordered-containers >=0.2 , utf8-string >=0.3.5 default-language: Haskell2010 if (!(os(windows))) build-depends: pager >=0.1.1.0 test-suite doctest type: exitcode-stdio-1.0 main-is: doctests.hs hs-source-dirs: ./ test ghc-options: -Wall -Wno-incomplete-uni-patterns -Wno-missing-signatures -Wno-orphans -Wno-type-defaults -Wno-unused-do-bind build-depends: Decimal >=0.5.1 , Glob >=0.7 , aeson >=1 , aeson-pretty , ansi-terminal >=0.9 , array , base >=4.14 && <4.19 , base-compat , blaze-markup >=0.5.1 , bytestring , call-stack , cassava , cassava-megaparsec , cmdargs >=0.10 , colour >=2.3.6 , containers >=0.5.9 , data-default >=0.5 , deepseq , directory , doclayout >=0.3 && <0.5 , doctest >=0.18.1 , extra >=1.6.3 , file-embed >=0.0.10 , filepath , hashtables >=1.2.3.1 , megaparsec >=7.0.0 && <9.4 , microlens >=0.4 , microlens-th >=0.4 , mtl >=2.2.1 , parser-combinators >=0.4.0 , pretty-simple >4 && <5 , regex-tdfa , safe >=0.3.19 , tabular >=0.2 , tasty >=1.2.3 , tasty-hunit >=0.10.0.2 , template-haskell , terminal-size >=0.3.3 , text >=1.2 , text-ansi >=0.2.1 , time >=1.5 , timeit , transformers >=0.2 , uglymemo , unordered-containers >=0.2 , utf8-string >=0.3.5 default-language: Haskell2010 if (!(os(windows))) build-depends: pager >=0.1.1.0 if impl(ghc >= 9.0) && impl(ghc < 9.2) buildable: False test-suite unittest type: exitcode-stdio-1.0 main-is: unittest.hs hs-source-dirs: ./ test ghc-options: -Wall -Wno-incomplete-uni-patterns -Wno-missing-signatures -Wno-orphans -Wno-type-defaults -Wno-unused-do-bind build-depends: Decimal >=0.5.1 , Glob >=0.9 , aeson >=1 , aeson-pretty , ansi-terminal >=0.9 , array , base >=4.14 && <4.19 , base-compat , blaze-markup >=0.5.1 , bytestring , call-stack , cassava , cassava-megaparsec , cmdargs >=0.10 , colour >=2.3.6 , containers >=0.5.9 , data-default >=0.5 , deepseq , directory , doclayout >=0.3 && <0.5 , extra >=1.6.3 , file-embed >=0.0.10 , filepath , hashtables >=1.2.3.1 , hledger-lib , megaparsec >=7.0.0 && <9.4 , microlens >=0.4 , microlens-th >=0.4 , mtl >=2.2.1 , parser-combinators >=0.4.0 , pretty-simple >4 && <5 , regex-tdfa , safe >=0.3.19 , tabular >=0.2 , tasty >=1.2.3 , tasty-hunit >=0.10.0.2 , template-haskell , terminal-size >=0.3.3 , text >=1.2 , text-ansi >=0.2.1 , time >=1.5 , timeit , transformers >=0.2 , uglymemo , unordered-containers >=0.2 , utf8-string >=0.3.5 buildable: True default-language: Haskell2010 if (!(os(windows))) build-depends: pager >=0.1.1.0